diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -579,6 +579,26 @@ LLVMMatchType<1>]), [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For indexed segment store + // Input: (value, pointer, offset, vl) + class RISCVISegStore + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyvector_ty, + llvm_anyint_ty]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For indexed segment store with mask + // Input: (value, pointer, offset, mask, vl) + class RISCVISegStoreMask + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; @@ -701,6 +721,10 @@ def "int_riscv_" # NAME : RISCVSSegStore; def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask; } + multiclass RISCVISegStore { + def "int_riscv_" # NAME : RISCVISegStore; + def "int_riscv_" # NAME # "_mask" : RISCVISegStoreMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -999,6 +1023,8 @@ defm vlxseg # nf : RISCVISegLoad; defm vsseg # nf : RISCVUSSegStore; defm vssseg # nf : RISCVSSegStore; + // TODO: In v1.0, it should be vsoxseg. + defm vsxseg # nf : RISCVISegStore; } } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -61,6 +61,8 @@ void selectVLXSEGMask(SDNode *Node, unsigned IntNo); void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided); void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided); + void selectVSXSEG(SDNode *Node, unsigned IntNo); + void selectVSXSEGMask(SDNode *Node, unsigned IntNo); // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -359,6 +359,67 @@ ReplaceNode(Node, Store); } +void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumOperands() - 5; + EVT VT = Node->getOperand(2)->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = { + StoreVal, + Node->getOperand(2 + NF), // Base pointer. + Node->getOperand(3 + NF), // Index. + Node->getOperand(4 + NF), // VL. + SEW, + Node->getOperand(0) // Chain. + }; + + EVT IndexVT = Node->getOperand(3 + NF)->getValueType(0); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); + ReplaceNode(Node, Store); +} + +void RISCVDAGToDAGISel::selectVSXSEGMask(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumOperands() - 6; + EVT VT = Node->getOperand(2)->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = { + StoreVal, + Node->getOperand(2 + NF), // Base pointer. + Node->getOperand(3 + NF), // Index. + Node->getOperand(4 + NF), // Mask. + Node->getOperand(5 + NF), // VL. + SEW, + Node->getOperand(0) // Chain. + }; + + EVT IndexVT = Node->getOperand(3 + NF)->getValueType(0); + RISCVVLMUL IndexLMUL = getLMUL(IndexVT); + unsigned IndexScalarSize = IndexVT.getScalarSizeInBits(); + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, IndexScalarSize, static_cast(LMUL), + static_cast(IndexLMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); + ReplaceNode(Node, Store); +} + void RISCVDAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we have already selected. if (Node->isMachineOpcode()) { @@ -587,6 +648,26 @@ selectVSSEGMask(Node, IntNo, /*IsStrided=*/true); return; } + case Intrinsic::riscv_vsxseg2: + case Intrinsic::riscv_vsxseg3: + case Intrinsic::riscv_vsxseg4: + case Intrinsic::riscv_vsxseg5: + case Intrinsic::riscv_vsxseg6: + case Intrinsic::riscv_vsxseg7: + case Intrinsic::riscv_vsxseg8: { + selectVSXSEG(Node, IntNo); + return; + } + case Intrinsic::riscv_vsxseg2_mask: + case Intrinsic::riscv_vsxseg3_mask: + case Intrinsic::riscv_vsxseg4_mask: + case Intrinsic::riscv_vsxseg5_mask: + case Intrinsic::riscv_vsxseg6_mask: + case Intrinsic::riscv_vsxseg7_mask: + case Intrinsic::riscv_vsxseg8_mask: { + selectVSXSEGMask(Node, IntNo); + return; + } } break; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -443,7 +443,8 @@ !subst("VLSSEG", "vlsseg", !subst("VSSEG", "vsseg", !subst("VSSSEG", "vssseg", - !subst("VLXSEG", "vlxseg", Upper))))); + !subst("VLXSEG", "vlxseg", + !subst("VSXSEG", "vsxseg", Upper)))))); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -1149,6 +1150,39 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoISegStoreNoMask EEW, bits<3> LMUL>: + Pseudo<(outs), + (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, + GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoISegStoreMask EEW, bits<3> LMUL>: + Pseudo<(outs), + (ins ValClass:$rd, GPR:$rs1, IdxClass: $index, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul, LMUL> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1738,6 +1772,27 @@ } } +multiclass VPseudoISegStore { + foreach idx_eew = EEWList in { // EEW for index argument. + foreach idx_lmul = MxSet.m in { // LMUL for index argument. + foreach val_lmul = MxList.m in { // LMUL for the value. + defvar IdxLInfo = idx_lmul.MX; + defvar IdxVreg = idx_lmul.vrclass; + defvar ValLInfo = val_lmul.MX; + let VLMul = val_lmul.value in { + foreach nf = NFSet.L in { + defvar ValVreg = SegRegClass.RC; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo : + VPseudoISegStoreNoMask; + def nf # "EI" # idx_eew # "_V_" # IdxLInfo # "_" # ValLInfo # "_MASK" : + VPseudoISegStoreMask; + } + } + } + } + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -2942,6 +2997,8 @@ defm PseudoVLXSEG : VPseudoISegLoad; defm PseudoVSSEG : VPseudoUSSegStore; defm PseudoVSSSEG : VPseudoSSegStore; +// TODO: In v1.0, it should be PseudoVSOXSEG. +defm PseudoVSXSEG : VPseudoISegStore; //===----------------------------------------------------------------------===// // 8. Vector AMO Operations diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsxseg-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsxseg-rv32.ll @@ -0,0 +1,573 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv16i16(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv16i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv16i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv16i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv16i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv16i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i32(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv2i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i16(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv4i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv32i16(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv32i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv32i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i32(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i16(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv8i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv8i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i32(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv8i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv64i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv64i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv64i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv64i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv4i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i16(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv32i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv32i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv32i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i8(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv2i8( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i8( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv16i32(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv16i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv16i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i16(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv2i16( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i16( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i32(,,, double*, , i32) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i32) + +define void @test_vsxseg3_nxv1f64_nxv4i32( %val, double* %base, %index, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i32( %val, %val, %val, double* %base, %index, i32 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i32 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i32( %val, %val, %val, double* %base, %index, %mask, i32 %vl) + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsxseg-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsxseg-rv64.ll @@ -0,0 +1,108040 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv32i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv32i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv32i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv4i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv16i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv16i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv1i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv1i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv8i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv4i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv1i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv2i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv8i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv4i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv64i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv64i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv64i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv4i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv8i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv1i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv2i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv8i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv32i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv32i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv32i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv16i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv16i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv2i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i16.nxv2i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv16i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv16i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv16i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv16i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv32i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv32i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv32i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv4i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv16i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv16i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv16i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv1i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv1i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv1i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv1i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv8i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv8i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv4i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv1i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv1i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv2i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv2i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv8i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv8i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv4i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv64i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv64i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv64i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv4i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv8i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv8i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv1i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv1i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv2i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv2i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv8i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv8i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv32i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv32i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv32i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv16i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv16i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv16i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv2i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv2i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i32.nxv2i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv4i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i32.nxv2i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv16i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv16i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv16i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv16i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv32i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv32i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv32i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv32i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv4i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv16i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv16i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv16i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv16i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv1i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv1i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv8i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv8i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv4i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv1i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv2i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv8i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv8i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv4i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv64i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv64i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv64i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv64i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv4i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv8i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv8i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv1i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv2i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv8i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv8i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv8i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv32i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv32i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv32i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv32i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv16i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv16i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv16i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv16i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv2i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i32.nxv2i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv4i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv16i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv16i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv32i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv32i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv4i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv16i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv16i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv1i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv1i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv8i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv4i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv1i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv2i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv8i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv4i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv64i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv64i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv4i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv8i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv8i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv1i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv2i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv8i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv32i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv32i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv16i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv16i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv2i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i32.nxv2i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv4i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv16i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv16i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv32i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv32i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv4i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv16i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv16i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv1i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv1i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv8i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv4i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv1i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv2i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv8i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv4i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv64i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv64i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv64i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv4i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv8i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv1i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv2i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv8i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv32i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv32i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv16i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv16i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv2i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16i8.nxv2i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv16i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv16i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv16i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv32i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv32i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv32i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv4i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv16i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv16i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv1i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv1i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv8i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv4i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv1i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv2i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv8i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv4i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv64i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv64i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv64i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv4i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv8i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv1i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv2i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv8i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv32i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv32i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv32i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv16i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv16i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv2i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv16i8.nxv2i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv16i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv16i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv16i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv16i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv16i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv16i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv32i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv32i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv4i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv16i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv16i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv1i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv1i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv8i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv4i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv1i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv2i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv8i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv4i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv64i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv64i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv4i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv8i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv1i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv2i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv8i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv32i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv32i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv16i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv16i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv2i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv16i8.nxv2i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv16i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv16i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv16i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv16i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv16i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv16i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv16i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv16i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv16i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv32i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv32i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv32i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv32i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv4i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv4i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv16i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv16i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv16i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv16i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv1i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv1i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv8i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv8i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv4i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv4i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv1i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv2i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv2i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv8i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv8i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv4i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv4i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv64i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv64i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv64i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv64i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv4i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv4i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv8i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv8i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv1i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv2i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv2i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv8i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv8i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv8i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv32i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv32i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv32i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv32i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv16i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv16i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv16i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv16i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv2i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv2i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i64.nxv2i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i64.nxv2i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv16i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv16i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv16i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv16i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv32i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv32i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv32i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv32i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv4i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv4i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv16i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv16i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv16i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv16i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv1i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv1i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv8i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv8i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv4i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv4i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv1i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv2i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv2i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv8i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv8i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv4i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv4i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv64i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv64i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv64i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv64i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv4i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv4i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv4i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv8i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv8i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv1i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv2i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv2i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv8i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv8i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv8i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv32i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv32i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv32i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv32i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv16i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv16i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv16i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv16i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv2i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv2i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i64.nxv2i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i64.nxv2i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv16i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv16i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv16i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv16i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv32i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv32i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv32i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv32i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv4i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv4i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv16i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv16i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv16i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv16i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv1i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv1i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv8i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv8i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv4i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv4i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv1i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv2i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv8i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv8i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv4i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv4i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv64i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv64i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv64i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv64i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv4i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv4i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv4i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv8i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv8i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv1i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv2i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv8i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv8i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv8i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv32i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv32i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv32i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv32i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv16i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv16i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv16i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv16i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv2i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i64.nxv2i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv16i16(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv16i16(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv32i16(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv32i16(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv4i32(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i32(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv16i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv16i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv1i64(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i64(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv1i32(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i32(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv8i16(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i16(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv4i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv1i16(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i16(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv2i32(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i32(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv8i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv4i64(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i64(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv64i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv64i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv4i16(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i16(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv8i64(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i64(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv1i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv2i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv8i32(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i32(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv32i8(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv32i8(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv16i32(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv16i32(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv2i16(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i16(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i64.nxv2i64(,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i64(,,,,, i64*, , , i64) + +define void @test_vsxseg5_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv16i16(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv16i16(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv32i16(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv32i16(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv4i32(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i32(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv16i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv16i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv1i64(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i64(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv1i32(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i32(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv8i16(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i16(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv4i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv1i16(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i16(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv2i32(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i32(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv8i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv4i64(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i64(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv64i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv64i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv4i16(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i16(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv8i64(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i64(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv1i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv2i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv8i32(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i32(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv32i8(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv32i8(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv16i32(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv16i32(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv2i16(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i16(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i64.nxv2i64(,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i64(,,,,,, i64*, , , i64) + +define void @test_vsxseg6_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv16i16(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv16i16(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv32i16(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv32i16(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv4i32(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i32(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv16i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv16i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv1i64(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i64(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv1i32(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i32(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv8i16(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i16(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv4i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv1i16(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i16(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv2i32(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i32(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv8i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv4i64(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i64(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv64i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv64i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv4i16(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i16(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv8i64(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i64(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv1i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv2i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv8i32(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i32(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv32i8(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv32i8(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv16i32(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv16i32(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv2i16(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i16(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i64.nxv2i64(,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i64(,,,,,,, i64*, , , i64) + +define void @test_vsxseg7_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv16i16(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv16i16(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv32i16(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv32i16(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv4i32(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i32(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv16i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv16i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv1i64(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i64(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv1i32(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i32(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv8i16(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i16(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv4i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv1i16(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i16(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv2i32(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i32(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv8i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv4i64(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i64(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv64i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv64i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv4i16(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i16(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv8i64(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i64(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv1i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv2i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv8i32(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i32(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv32i8(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv32i8(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv16i32(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv16i32(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv2i16(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i16(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i64.nxv2i64(,,,,,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i64(,,,,,,,, i64*, , , i64) + +define void @test_vsxseg8_nxv1i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv16i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv16i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv16i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv32i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv32i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv32i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv4i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv4i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv16i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv16i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv16i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv1i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv1i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv8i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv8i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv4i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv4i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv1i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv2i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv2i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv8i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv8i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv4i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv4i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv64i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv64i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv64i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv4i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv4i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv8i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv8i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv1i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv2i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv2i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv8i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv8i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv32i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv32i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv32i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv16i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv16i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv16i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv2i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv2i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i32.nxv2i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i32.nxv2i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv16i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv16i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv16i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv16i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv32i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv32i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv32i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv32i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv4i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv16i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv16i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv16i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv16i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv1i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv1i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv8i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv8i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv4i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv1i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv2i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv8i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv8i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv4i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv64i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv64i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv64i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv64i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv4i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv8i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv8i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv1i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv2i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv8i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv8i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv8i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv32i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv32i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv32i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv32i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv16i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv16i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv16i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv16i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv2i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i32.nxv2i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv16i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv16i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv32i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv32i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv4i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv16i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv16i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv1i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv1i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv8i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv4i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv1i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv2i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv8i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv4i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv64i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv64i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv4i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv8i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv8i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv1i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv2i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv8i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv32i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv32i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv16i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv16i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv2i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i32.nxv2i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv16i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv16i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv32i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv32i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv4i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv16i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv16i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv1i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv1i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv8i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv4i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv1i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv2i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv8i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv4i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv64i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv64i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv4i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv8i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv1i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv2i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv8i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv32i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv32i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv16i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv16i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv2i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i32.nxv2i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv16i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv16i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv32i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv32i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv4i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv16i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv16i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv1i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv1i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv8i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv4i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv1i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv2i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv8i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv4i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv64i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv64i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv4i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv8i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv1i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv2i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv8i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv32i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv32i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv16i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv16i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv2i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i32.nxv2i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv16i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv16i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv32i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv32i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv4i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv16i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv16i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv1i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv1i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv8i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv4i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv1i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv2i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv8i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv4i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv64i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv64i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv4i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv8i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv1i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv2i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv8i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv32i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv32i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv16i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv16i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv2i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i32.nxv2i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv16i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv16i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv32i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv32i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv4i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv16i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv16i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv1i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv1i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv8i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv4i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv1i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv2i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv8i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv4i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv64i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv64i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv4i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv8i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv1i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv2i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv8i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv32i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv32i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv16i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv16i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv2i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i32.nxv2i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv1i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv32i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv32i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv32i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv4i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv16i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv16i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv1i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv1i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv8i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv4i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv1i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv2i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv8i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv4i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv64i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv64i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv64i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv4i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv8i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv1i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv2i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv8i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv32i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv32i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv32i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv16i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv16i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv2i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i16.nxv2i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv8i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv16i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv16i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv16i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv32i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv32i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv32i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv4i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv16i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv16i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv16i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv1i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv1i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv8i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv4i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv1i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv2i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv8i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv4i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv4i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv64i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv64i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv64i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv4i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv8i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv1i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv2i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv8i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv32i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv32i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv32i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv16i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv16i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv16i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv2i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i16.nxv2i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv8i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv16i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv16i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv32i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv32i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv4i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv16i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv16i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv1i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv1i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv8i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv4i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv1i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv2i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv8i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv4i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv64i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv64i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv4i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv8i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv1i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv2i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv8i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv32i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv32i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv16i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv16i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv2i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i16.nxv2i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv8i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv16i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv16i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv32i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv32i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv4i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv16i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv16i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv1i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv1i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv8i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv4i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv1i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv2i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv8i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv4i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv64i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv64i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv64i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv4i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv8i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv1i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv2i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv8i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv32i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv32i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv16i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv16i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv2i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i8.nxv2i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv16i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv16i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv32i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv32i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv32i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv4i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv16i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv16i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv1i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv1i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv8i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv4i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv1i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv2i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv8i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv4i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv64i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv64i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv64i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv4i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv8i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv1i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv2i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv8i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv32i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv32i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv32i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv16i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv16i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv2i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i8.nxv2i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv16i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv16i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv32i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv32i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv4i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv16i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv16i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv1i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv1i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv8i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv4i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv1i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv2i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv8i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv4i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv64i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv64i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv4i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv8i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv1i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv2i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv8i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv32i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv32i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv16i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv16i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv2i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i8.nxv2i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv16i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv16i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv32i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv32i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv4i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv16i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv16i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv1i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv1i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv8i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv4i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv1i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv2i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv8i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv4i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv64i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv64i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv4i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv8i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv1i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv2i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv8i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv32i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv32i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv16i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv16i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv2i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i8.nxv2i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv16i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv16i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv32i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv32i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv4i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv16i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv16i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv1i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv1i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv8i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv4i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv1i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv2i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv8i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv4i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv64i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv64i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv4i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv8i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv1i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv2i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv8i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv32i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv32i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv16i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv16i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv2i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i8.nxv2i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv16i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv16i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv32i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv32i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv4i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv16i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv16i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv1i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv1i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv8i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv4i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv1i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv2i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv8i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv4i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv64i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv64i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv4i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv8i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv1i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv2i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv8i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv32i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv32i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv16i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv16i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv2i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i8.nxv2i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv16i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv32i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv4i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv16i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv1i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv1i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv8i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv4i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv1i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv2i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv8i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv4i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv64i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv4i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv8i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv1i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv2i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv8i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv32i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv16i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv2i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i8.nxv2i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv4i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv32i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv32i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv32i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv4i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv16i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv16i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv1i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv1i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv8i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv4i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv1i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv2i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv8i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv4i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv64i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv64i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv64i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv4i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv8i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv1i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv2i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv8i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv32i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv32i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv32i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv16i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv16i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv2i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i16.nxv2i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv16i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv16i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv16i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv32i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv32i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv32i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv4i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv16i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv16i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv16i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv1i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv1i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv8i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv4i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv1i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv2i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv8i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv4i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv4i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv64i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv64i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv64i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv4i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv8i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv8i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv1i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv2i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv8i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv32i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv32i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv32i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv16i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv16i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv16i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv2i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i16.nxv2i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv16i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv16i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv32i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv32i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv4i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv16i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv16i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv1i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv1i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv8i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv4i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv1i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv2i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv8i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv4i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv64i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv64i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv4i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv8i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv1i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv2i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv8i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv32i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv32i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv16i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv16i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv2i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i16.nxv2i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv16i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv16i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv32i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv32i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv4i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv16i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv16i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv1i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv1i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv8i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv4i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv1i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv2i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv8i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv4i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv64i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv64i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv4i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv8i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv1i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv2i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv8i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv32i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv32i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv16i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv16i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv2i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i16.nxv2i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv16i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv16i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv32i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv32i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv4i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv16i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv16i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv1i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv1i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv8i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv4i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv1i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv2i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv8i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv4i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv64i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv64i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv4i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv8i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv1i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv2i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv8i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv32i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv32i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv16i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv16i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv2i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i16.nxv2i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv16i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv16i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv32i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv32i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv4i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv16i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv16i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv1i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv1i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv8i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv4i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv1i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv2i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv8i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv4i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv64i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv64i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv4i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv8i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv1i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv2i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv8i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv32i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv32i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv16i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv16i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv2i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i16.nxv2i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv16i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv16i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv32i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv32i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv4i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv16i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv16i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv1i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv1i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv8i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv4i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv1i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv2i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv8i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv4i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv64i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv64i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv4i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv8i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv1i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv2i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv8i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv32i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv32i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv16i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv16i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv2i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i16.nxv2i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv1i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv16i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv16i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv16i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv32i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv32i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv32i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv4i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv4i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv16i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv16i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv16i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv1i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv1i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv1i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv1i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv8i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv8i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv4i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv4i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv1i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv1i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv2i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv8i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv8i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv4i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv4i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv64i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv64i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv64i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv4i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv4i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv8i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv8i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv1i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv1i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv2i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv8i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv8i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv32i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv32i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv32i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv16i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv16i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv16i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv2i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i32.nxv2i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv16i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv16i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv16i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv16i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv32i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv32i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv32i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv32i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv4i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv4i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv16i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv16i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv16i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv16i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv1i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv1i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv1i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv1i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv8i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv8i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv4i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv4i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv1i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv1i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv2i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv8i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv8i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv4i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv4i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv64i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv64i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv64i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv64i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv4i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv4i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv4i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv8i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv8i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv1i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv1i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv1i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv2i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv8i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv8i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv8i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv32i8(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv32i8(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv32i8( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv32i8( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv16i32(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv16i32(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv16i32( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv16i32( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv2i16(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i16(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i16( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i32.nxv2i64(,,, i32*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i64(,,, i32*, , , i64) + +define void @test_vsxseg3_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i32.nxv2i64( %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv16i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv16i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv32i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv32i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv4i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv16i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv16i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv1i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv1i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv8i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv4i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv1i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv2i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv8i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv4i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv64i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv64i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv4i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv8i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv8i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv1i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv2i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv8i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv32i8(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv32i8(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv16i32(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv16i32(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv2i16(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i16(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i32.nxv2i64(,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i64(,,,, i32*, , , i64) + +define void @test_vsxseg4_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv16i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv16i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv32i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv32i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv4i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv16i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv16i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv1i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv1i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv8i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv4i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv1i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv2i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv8i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv4i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv64i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv64i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv4i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv8i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv1i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv2i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv8i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv32i8(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv32i8(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv16i32(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv16i32(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv2i16(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i16(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i32.nxv2i64(,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i64(,,,,, i32*, , , i64) + +define void @test_vsxseg5_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv16i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv16i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv32i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv32i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv4i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv16i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv16i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv1i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv1i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv8i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv4i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv1i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv2i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv8i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv4i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv64i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv64i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv4i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv8i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv1i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv2i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv8i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv32i8(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv32i8(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv16i32(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv16i32(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv2i16(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i16(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i32.nxv2i64(,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i64(,,,,,, i32*, , , i64) + +define void @test_vsxseg6_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv16i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv16i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv32i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv32i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv4i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv16i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv16i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv1i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv1i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv8i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv4i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv1i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv2i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv8i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv4i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv64i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv64i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv4i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv8i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv1i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv2i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv8i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv32i8(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv32i8(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv16i32(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv16i32(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv2i16(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i16(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i32.nxv2i64(,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i64(,,,,,,, i32*, , , i64) + +define void @test_vsxseg7_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv16i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv16i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv32i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv32i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv4i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv16i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv16i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv1i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv1i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv8i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv4i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv1i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv2i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv8i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv4i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv64i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv64i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv4i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv8i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv1i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv2i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv8i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv32i8(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv32i8(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv16i32(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv16i32(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv2i16(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i16(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i32.nxv2i64(,,,,,,,, i32*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i64(,,,,,,,, i32*, , , i64) + +define void @test_vsxseg8_nxv2i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv16i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv16i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv32i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv32i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv4i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv16i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv16i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv1i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv1i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv8i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv4i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv1i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv2i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv8i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv4i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv64i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv64i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv64i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv4i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv8i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv1i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv2i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv8i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv32i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv32i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv16i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv16i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv2i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i8.nxv2i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv16i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv16i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv32i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv32i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv32i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv4i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv16i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv16i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv1i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv1i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv8i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv4i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv1i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv2i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv8i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv4i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv64i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv64i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv64i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv4i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv8i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv1i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv2i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv8i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv32i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv32i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv32i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv16i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv16i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv2i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8i8.nxv2i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv16i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv16i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv32i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv32i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv4i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv16i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv16i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv1i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv1i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv8i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv4i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv1i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv2i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv8i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv4i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv64i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv64i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv4i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv8i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv1i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv2i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv8i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv32i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv32i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv16i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv16i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv2i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8i8.nxv2i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv16i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv16i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv32i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv32i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv4i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv16i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv16i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv1i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv1i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv8i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv4i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv1i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv2i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv8i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv4i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv64i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv64i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv4i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv8i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv1i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv2i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv8i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv32i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv32i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv16i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv16i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv2i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv8i8.nxv2i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv16i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv16i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv32i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv32i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv4i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv16i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv16i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv1i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv1i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv8i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv4i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv1i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv2i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv8i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv4i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv64i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv64i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv4i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv8i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv1i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv2i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv8i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv32i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv32i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv16i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv16i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv2i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv8i8.nxv2i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv16i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv16i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv32i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv32i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv4i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv16i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv16i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv1i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv1i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv8i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv4i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv1i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv2i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv8i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv4i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv64i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv64i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv4i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv8i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv1i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv2i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv8i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv32i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv32i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv16i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv16i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv2i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv8i8.nxv2i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv16i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv32i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv4i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv16i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv1i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv1i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv8i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv4i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv1i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv2i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv8i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv4i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv64i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv4i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv8i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv1i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv2i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv8i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv32i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv16i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv2i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv8i8.nxv2i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv8i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv8i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv8i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv8i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv16i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv16i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv16i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv16i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv32i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv32i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv32i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv32i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv4i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv16i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv16i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv16i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv16i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv1i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv1i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv1i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv1i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv8i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv8i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv4i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv1i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv1i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv2i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv2i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv8i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv8i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv4i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv64i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv64i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv64i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv64i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv4i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv8i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv8i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv1i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv1i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv2i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv2i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv8i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv8i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv8i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv32i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv32i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv32i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv32i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv16i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv16i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv16i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv16i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv2i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv2i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i64.nxv2i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv4i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i64.nxv2i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv32i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv32i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv32i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv4i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv16i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv16i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv1i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv1i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv8i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv4i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv1i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv2i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv8i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv4i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv64i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv64i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv64i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv4i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv8i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv1i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv2i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv8i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv32i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv32i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv32i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv16i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv16i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv2i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4i16.nxv2i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv16i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv16i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv16i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv32i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv32i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv32i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv4i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv16i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv16i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv16i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv1i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv1i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv8i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv4i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv1i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv2i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv8i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv4i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv64i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv64i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv64i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv4i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv8i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv8i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv1i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv2i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv8i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv32i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv32i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv32i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv16i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv16i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv16i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv2i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4i16.nxv2i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv16i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv16i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv32i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv32i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv4i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv16i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv16i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv1i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv1i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv8i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv4i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv1i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv2i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv8i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv4i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv64i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv64i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv4i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv8i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv1i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv2i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv8i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv32i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv32i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv16i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv16i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv2i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4i16.nxv2i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv16i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv16i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv32i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv32i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv4i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv16i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv16i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv1i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv1i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv8i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv4i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv1i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv2i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv8i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv4i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv64i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv64i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv4i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv8i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv1i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv2i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv8i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv32i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv32i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv16i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv16i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv2i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4i16.nxv2i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv16i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv16i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv32i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv32i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv4i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv16i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv16i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv1i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv1i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv8i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv4i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv1i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv2i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv8i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv4i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv64i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv64i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv4i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv8i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv1i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv2i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv8i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv32i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv32i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv16i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv16i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv2i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4i16.nxv2i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv16i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv16i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv32i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv32i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv4i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv16i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv16i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv1i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv1i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv8i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv4i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv1i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv2i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv8i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv4i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv64i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv64i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv4i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv8i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv1i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv2i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv8i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv32i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv32i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv16i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv16i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv2i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4i16.nxv2i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv16i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv16i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv32i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv32i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv4i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv16i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv16i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv1i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv1i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv8i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv4i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv1i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv2i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv8i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv4i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv64i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv64i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv4i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv8i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv1i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv2i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv8i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv32i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv32i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv16i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv16i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv2i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4i16.nxv2i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv4i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv16i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv16i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv32i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv32i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv4i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv16i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv16i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv1i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv1i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv8i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv4i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv1i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv2i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv8i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv4i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv64i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv64i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv64i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv4i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv8i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv1i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv2i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv8i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv32i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv32i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv16i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv16i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv2i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1i8.nxv2i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv16i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv16i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv32i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv32i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv32i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv4i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv16i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv16i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv1i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv1i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv8i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv4i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv1i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv2i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv8i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv4i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv64i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv64i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv64i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv4i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv8i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv1i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv2i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv8i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv32i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv32i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv32i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv16i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv16i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv2i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1i8.nxv2i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv16i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv16i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv32i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv32i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv4i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv16i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv16i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv1i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv1i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv8i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv4i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv1i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv2i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv8i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv4i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv64i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv64i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv4i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv8i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv1i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv2i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv8i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv32i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv32i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv16i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv16i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv2i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1i8.nxv2i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv16i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv16i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv32i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv32i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv4i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv16i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv16i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv1i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv1i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv8i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv4i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv1i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv2i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv8i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv4i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv64i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv64i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv4i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv8i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv1i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv2i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv8i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv32i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv32i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv16i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv16i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv2i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1i8.nxv2i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv16i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv16i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv32i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv32i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv4i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv16i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv16i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv1i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv1i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv8i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv4i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv1i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv2i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv8i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv4i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv64i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv64i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv4i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv8i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv1i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv2i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv8i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv32i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv32i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv16i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv16i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv2i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1i8.nxv2i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv16i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv16i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv32i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv32i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv4i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv16i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv16i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv1i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv1i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv8i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv4i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv1i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv2i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv8i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv4i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv64i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv64i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv4i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv8i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv1i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv2i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv8i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv32i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv32i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv16i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv16i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv2i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1i8.nxv2i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv16i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv32i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv4i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv16i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv1i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv1i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv8i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv4i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv1i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv2i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv8i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv4i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv64i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv4i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv8i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv1i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv2i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv8i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv32i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv16i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv2i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1i8.nxv2i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv1i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv16i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv16i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv32i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv32i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv4i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv16i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv16i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv1i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv1i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv8i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv4i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv1i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv2i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv8i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv4i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv64i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv64i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv64i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv4i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv8i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv1i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv2i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv8i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv32i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv32i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv16i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv16i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv2i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i8.nxv2i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv16i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv16i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv16i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv16i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv32i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv32i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv32i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv32i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv4i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv4i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv16i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv16i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv16i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv16i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv1i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv1i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv1i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv1i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv8i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv8i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv4i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv4i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv1i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv1i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv2i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv8i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv8i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv4i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv4i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv64i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv64i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv64i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv64i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv4i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv4i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv4i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv8i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv8i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv1i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv1i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv1i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv2i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv8i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv8i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv8i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv32i8(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv32i8(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv32i8( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv32i8( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv16i32(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv16i32(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv16i32( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv16i32( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv2i16(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i16(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i16( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i8.nxv2i64(,,, i8*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i64(,,, i8*, , , i64) + +define void @test_vsxseg3_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i8.nxv2i64( %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv16i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv16i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv32i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv32i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv4i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv16i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv16i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv1i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv1i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv8i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv4i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv1i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv2i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv8i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv4i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv64i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv64i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv4i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv8i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv1i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv2i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv8i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv32i8(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv32i8(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv16i32(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv16i32(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv2i16(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i16(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i8.nxv2i64(,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i64(,,,, i8*, , , i64) + +define void @test_vsxseg4_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv16i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv16i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv32i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv32i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv4i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv16i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv16i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv1i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv1i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv8i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv4i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv1i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv2i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv8i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv4i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv64i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv64i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv4i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv8i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv1i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv2i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv8i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv32i8(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv32i8(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv16i32(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv16i32(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv2i16(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i16(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i8.nxv2i64(,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i64(,,,,, i8*, , , i64) + +define void @test_vsxseg5_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv16i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv16i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv32i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv32i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv4i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv16i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv16i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv1i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv1i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv8i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv4i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv1i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv2i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv8i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv4i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv64i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv64i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv4i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv8i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv1i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv2i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv8i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv32i8(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv32i8(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv16i32(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv16i32(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv2i16(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i16(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i8.nxv2i64(,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i64(,,,,,, i8*, , , i64) + +define void @test_vsxseg6_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv16i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv16i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv32i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv32i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv4i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv16i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv16i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv1i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv1i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv8i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv4i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv1i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv2i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv8i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv4i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv64i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv64i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv4i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv8i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv1i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv2i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv8i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv32i8(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv32i8(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv16i32(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv16i32(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv2i16(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i16(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i8.nxv2i64(,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i64(,,,,,,, i8*, , , i64) + +define void @test_vsxseg7_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv16i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv16i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv32i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv32i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv4i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv16i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv16i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv1i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv1i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv8i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv4i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv1i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv2i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv8i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv4i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv64i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv64i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv4i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv8i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv1i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv2i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv8i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv32i8(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv32i8(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv16i32(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv16i32(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv2i16(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i16(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i8.nxv2i64(,,,,,,,, i8*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i64(,,,,,,,, i8*, , , i64) + +define void @test_vsxseg8_nxv2i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i8.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv16i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv16i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv16i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv16i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv16i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv16i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv32i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv32i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv32i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv32i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv32i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv32i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv4i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv4i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv4i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv4i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv16i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv16i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv16i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv16i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv16i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv16i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv1i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv1i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv1i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv1i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv1i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv1i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv1i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv1i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv8i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv8i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv8i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv4i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv4i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv4i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv4i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv1i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv1i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv1i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv1i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv2i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv2i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv2i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv2i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv8i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv8i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv8i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv4i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv4i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv4i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv4i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv64i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv64i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv64i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv64i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv64i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv64i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv4i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv4i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv4i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv4i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv4i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv8i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv8i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv8i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv1i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv1i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv1i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv1i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv1i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv2i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv2i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv2i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv2i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv8i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv8i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv8i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv8i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv32i8(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv32i8(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv32i8( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv32i8( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv32i8( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv32i8( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv16i32(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv16i32(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv16i32( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv16i32( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv16i32( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv16i32( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv2i16(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i16(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv2i16( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv2i16( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv2i16( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i16( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8i32.nxv2i64(,, i32*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i64(,, i32*, , , i64) + +define void @test_vsxseg2_nxv8i32_nxv2i64( %val, i32* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8i32.nxv2i64( %val, %val, i32* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8i32_nxv2i64( %val, i32* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8i32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8i32.nxv2i64( %val, %val, i32* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv16i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv16i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv16i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv16i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv16i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv16i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv32i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv32i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv32i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv32i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv32i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv4i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv4i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv4i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv4i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv16i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv16i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv16i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv16i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv16i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv16i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv1i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv1i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv1i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv1i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv1i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv1i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv1i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv1i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv8i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv8i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv8i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv8i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv4i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv4i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv4i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv4i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv1i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv1i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv1i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv1i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv2i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv2i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv2i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv2i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv8i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv8i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv8i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv8i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv4i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv4i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv4i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv4i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv64i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv64i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv64i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv64i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv64i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv64i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv4i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv4i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv4i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv4i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv4i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv8i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv8i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv8i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv8i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv1i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv1i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv1i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv1i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv1i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv2i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv2i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv2i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv2i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv8i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv8i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv8i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv8i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv8i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv32i8(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv32i8(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv32i8( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv32i8( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv32i8( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv16i32(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv16i32(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv16i32( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv16i32( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv16i32( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv16i32( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv2i16(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i16(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv2i16( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv2i16( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv2i16( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i16( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv32i8.nxv2i64(,, i8*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i64(,, i8*, , , i64) + +define void @test_vsxseg2_nxv32i8_nxv2i64( %val, i8* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv32i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv32i8.nxv2i64( %val, %val, i8* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv32i8_nxv2i64( %val, i8* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv32i8_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv32i8.nxv2i64( %val, %val, i8* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv16i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv16i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv16i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv16i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv32i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv32i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv32i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv32i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv4i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv4i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv16i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv16i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv16i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv16i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv1i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv1i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv1i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv1i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv8i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv8i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv4i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv4i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv1i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv1i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv2i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv8i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv8i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv4i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv4i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv64i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv64i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv64i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv64i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv4i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv4i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv4i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv8i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv8i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv1i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv1i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv1i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv2i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv8i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv8i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv8i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv32i8(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv32i8(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv32i8( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv32i8( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv16i32(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv16i32(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv16i32( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv16i32( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv2i16(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i16(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i16( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i16.nxv2i64(,, i16*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i64(,, i16*, , , i64) + +define void @test_vsxseg2_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i16.nxv2i64( %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv16i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv16i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv16i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv16i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv32i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv32i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv32i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv32i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv4i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv4i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv16i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv16i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv16i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv16i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv1i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv1i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv1i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv1i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv8i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv8i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv4i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv4i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv1i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv1i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv2i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv8i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv8i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv4i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv4i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv64i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv64i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv64i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv64i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv4i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv4i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv4i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv8i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv8i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv1i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv1i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv1i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv2i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv8i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv8i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv8i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv32i8(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv32i8(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv32i8( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv32i8( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv16i32(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv16i32(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv16i32( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv16i32( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv2i16(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i16(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i16( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i16.nxv2i64(,,, i16*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i64(,,, i16*, , , i64) + +define void @test_vsxseg3_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i16.nxv2i64( %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv16i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv16i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv32i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv32i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv4i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv16i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv16i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv1i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv1i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv8i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv4i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv1i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv2i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv8i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv4i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv64i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv64i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv4i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv8i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv1i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv2i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv8i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv32i8(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv32i8(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv16i32(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv16i32(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv2i16(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i16(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i16.nxv2i64(,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i64(,,,, i16*, , , i64) + +define void @test_vsxseg4_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv16i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv16i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv32i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv32i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv4i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv16i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv16i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv1i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv1i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv8i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv4i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv1i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv2i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv8i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv4i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv64i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv64i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv4i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv8i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv1i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv2i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv8i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv32i8(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv32i8(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv16i32(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv16i32(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv2i16(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i16(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2i16.nxv2i64(,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i64(,,,,, i16*, , , i64) + +define void @test_vsxseg5_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv16i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv16i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv32i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv32i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv4i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv16i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv16i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv1i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv1i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv8i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv4i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv1i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv2i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv8i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv4i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv64i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv64i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv4i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv8i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv1i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv2i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv8i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv32i8(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv32i8(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv16i32(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv16i32(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv2i16(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i16(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2i16.nxv2i64(,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i64(,,,,,, i16*, , , i64) + +define void @test_vsxseg6_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv16i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv16i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv32i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv32i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv4i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv16i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv16i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv1i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv1i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv8i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv4i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv1i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv2i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv8i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv4i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv64i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv64i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv4i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv8i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv1i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv2i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv8i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv32i8(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv32i8(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv16i32(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv16i32(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv2i16(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i16(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2i16.nxv2i64(,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i64(,,,,,,, i16*, , , i64) + +define void @test_vsxseg7_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv16i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv16i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv16i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv16i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv32i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv32i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv32i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv32i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv4i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv4i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv4i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv16i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv16i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv16i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv16i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv1i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv1i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv1i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv1i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv1i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv1i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv8i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv8i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv8i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv4i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv4i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv4i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv1i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv1i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv1i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv2i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv2i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv2i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv8i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv8i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv8i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv4i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv4i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv4i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv64i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv64i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv64i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv64i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv4i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv4i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv4i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv8i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv8i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv8i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv1i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv1i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv1i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv2i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv2i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv2i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv8i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv8i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv8i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv32i8(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv32i8(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv32i8( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv32i8( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv16i32(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv16i32(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv16i32( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv16i32( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv2i16(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i16(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv2i16( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv2i16( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2i16.nxv2i64(,,,,,,,, i16*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i64(,,,,,,,, i16*, , , i64) + +define void @test_vsxseg8_nxv2i16_nxv2i64( %val, i16* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2i16_nxv2i64( %val, i16* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2i16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2i16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv16i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv16i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv16i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv16i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv32i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv32i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv32i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv32i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv4i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv4i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv16i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv16i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv16i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv16i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv1i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv1i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv1i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv1i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv8i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv8i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv4i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv4i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv1i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv1i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv2i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv8i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv8i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv4i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv4i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv64i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv64i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv64i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv64i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv4i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv4i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv4i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv8i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv8i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv1i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv1i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv1i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv2i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv8i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv8i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv8i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv32i8(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv32i8(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv32i8( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv32i8( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv16i32(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv16i32(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv16i32( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv16i32( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv2i16(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i16(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i16( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2i64.nxv2i64(,, i64*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i64(,, i64*, , , i64) + +define void @test_vsxseg2_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2i64.nxv2i64( %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv16i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv16i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv16i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv16i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv32i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv32i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv32i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv32i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv4i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv4i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv16i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv16i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv16i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv16i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv1i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv1i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv1i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv1i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv8i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv8i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv4i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv4i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv1i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv1i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv2i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv8i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv8i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv4i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv4i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv64i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv64i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv64i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv64i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv4i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv4i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv4i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv8i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv8i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv1i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv1i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv1i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv2i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv8i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv8i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv8i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv32i8(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv32i8(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv32i8( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv32i8( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv16i32(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv16i32(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv16i32( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv16i32( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv2i16(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i16(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i16( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2i64.nxv2i64(,,, i64*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i64(,,, i64*, , , i64) + +define void @test_vsxseg3_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2i64.nxv2i64( %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv16i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv16i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv16i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv16i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv16i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv16i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv32i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv32i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv32i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv32i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv32i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv32i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv4i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv4i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv4i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv4i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv16i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv16i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv16i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv16i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv16i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv16i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv1i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv1i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv1i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv1i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv1i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv1i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv8i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv8i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv8i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv8i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv4i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv4i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv4i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv4i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv1i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv1i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv1i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv2i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv2i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv2i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv8i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv8i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv8i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv8i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv4i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv4i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv4i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv4i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv64i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv64i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv64i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv64i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv64i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv64i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv4i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv4i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv4i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv4i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv4i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv8i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv8i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv8i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv8i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv1i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv1i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv1i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv1i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv2i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv2i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv2i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv8i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv8i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv8i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv8i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv8i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv32i8(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv32i8(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv32i8( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv32i8( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv32i8( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv32i8( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv16i32(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv16i32(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv16i32( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv16i32( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv16i32( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv16i32( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv2i16(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i16(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv2i16( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv2i16( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i16( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2i64.nxv2i64(,,,, i64*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i64(,,,, i64*, , , i64) + +define void @test_vsxseg4_nxv2i64_nxv2i64( %val, i64* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2i64_nxv2i64( %val, i64* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2i64.nxv2i64( %val, %val, %val, %val, i64* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv16i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv16i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv32i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv32i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv32i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv32i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv4i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv16i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv16i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv1i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv1i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv8i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv4i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv1i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv2i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv8i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv4i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv64i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv64i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv64i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv64i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv4i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv8i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv1i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv2i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv8i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv32i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv32i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv32i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv32i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv16i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv16i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv2i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv16f16.nxv2i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv16f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv16f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv16f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv16f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv16f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv16f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv16i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv16i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv16i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv16i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv32i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv32i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv32i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv32i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv4i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv4i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv16i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv16i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv16i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv16i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv1i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv1i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv1i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv1i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv8i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv8i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv4i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv4i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv1i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv1i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv2i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv2i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv8i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv8i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv4i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv4i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv64i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv64i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv64i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv64i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv4i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv4i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv8i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv8i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv1i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv1i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv2i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv2i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv8i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv8i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv8i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv32i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv32i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv32i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv32i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv16i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv16i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv16i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv16i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv2i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv2i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f64.nxv2i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv4f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f64.nxv2i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv16i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv16i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv16i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv16i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv32i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv32i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv32i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv32i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv4i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv4i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv16i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv16i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv16i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv16i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv1i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv1i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv1i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv1i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv8i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv8i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv4i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv4i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv1i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv1i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv2i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv2i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv8i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv8i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv4i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv4i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv64i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv64i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv64i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv64i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv4i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv4i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv8i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv8i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv1i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv1i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv2i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv2i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv8i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv8i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv8i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv32i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv32i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv32i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv32i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv16i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv16i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv16i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv16i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv2i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv2i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f64.nxv2i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f64.nxv2i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv16i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv16i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv32i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv32i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv16i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv16i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv64i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv64i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv64i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv64i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv4i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv4i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv4i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv1i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv8i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv8i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv8i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv32i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv32i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv32i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv16i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv16i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv16i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f64.nxv2i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f64.nxv2i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv16i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv16i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv32i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv32i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv4i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv16i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv16i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv1i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv1i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv8i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv4i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv1i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv2i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv8i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv4i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv4i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv64i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv64i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv4i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv8i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv8i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv1i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv2i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv8i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv32i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv32i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv16i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv16i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv2i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f64.nxv2i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv16i16(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv16i16(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv32i16(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv32i16(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv4i32(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i32(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv16i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv16i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv1i64(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i64(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv1i32(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i32(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv8i16(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i16(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv4i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv1i16(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i16(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv2i32(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i32(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv8i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv4i64(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i64(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv64i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv64i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv4i16(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i16(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv8i64(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i64(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv1i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv2i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv8i32(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i32(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv32i8(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv32i8(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv16i32(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv16i32(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv2i16(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i16(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f64.nxv2i64(,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i64(,,,,, double*, , , i64) + +define void @test_vsxseg5_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv16i16(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv16i16(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv32i16(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv32i16(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv4i32(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i32(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv16i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv16i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv1i64(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i64(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv1i32(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i32(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv8i16(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i16(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv4i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv1i16(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i16(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv2i32(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i32(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv8i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv4i64(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i64(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv64i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv64i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv4i16(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i16(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv8i64(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i64(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv1i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv2i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv8i32(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i32(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv32i8(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv32i8(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv16i32(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv16i32(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv2i16(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i16(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f64.nxv2i64(,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i64(,,,,,, double*, , , i64) + +define void @test_vsxseg6_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv16i16(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv16i16(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv32i16(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv32i16(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv4i32(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i32(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv16i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv16i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv1i64(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i64(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv1i32(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i32(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv8i16(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i16(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv4i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv1i16(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i16(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv2i32(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i32(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv8i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv4i64(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i64(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv64i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv64i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv4i16(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i16(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv8i64(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i64(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv1i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv2i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv8i32(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i32(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv32i8(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv32i8(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv16i32(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv16i32(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv2i16(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i16(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f64.nxv2i64(,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i64(,,,,,,, double*, , , i64) + +define void @test_vsxseg7_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv16i16(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv16i16(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv32i16(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv32i16(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv4i32(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i32(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv16i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv16i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv1i64(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i64(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv1i32(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i32(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv8i16(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i16(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv4i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv1i16(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i16(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv2i32(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i32(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv8i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv4i64(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i64(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv64i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv64i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv4i16(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i16(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv8i64(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i64(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv1i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv2i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv8i32(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i32(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv32i8(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv32i8(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv16i32(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv16i32(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv2i16(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i16(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f64.nxv2i64(,,,,,,,, double*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i64(,,,,,,,, double*, , , i64) + +define void @test_vsxseg8_nxv1f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f64.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv16i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv16i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv16i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv16i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv32i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv32i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv32i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv32i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv4i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv4i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv16i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv16i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv16i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv16i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv1i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv1i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv1i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv1i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv8i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv8i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv4i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv4i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv1i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv1i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv2i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv2i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv8i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv8i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv4i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv4i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv64i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv64i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv64i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv64i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv4i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv4i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv8i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv8i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv1i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv1i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv2i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv2i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv8i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv8i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv32i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv32i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv32i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv32i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv16i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv16i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv16i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv16i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv2i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv2i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f32.nxv2i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f32.nxv2i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv16i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv16i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv16i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv16i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv32i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv32i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv32i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv32i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv4i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv16i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv16i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv16i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv16i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv1i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv1i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv8i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv8i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv4i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv1i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv2i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv8i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv8i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv4i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv64i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv64i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv64i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv64i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv4i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv8i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv8i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv1i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv2i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv8i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv8i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv8i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv32i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv32i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv32i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv32i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv16i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv16i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv16i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv16i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv2i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f32.nxv2i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv16i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv16i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv32i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv32i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv4i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv16i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv16i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv1i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv1i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv8i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv4i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv1i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv2i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv8i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv4i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv64i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv64i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv4i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv8i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv8i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv1i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv2i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv8i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv32i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv32i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv16i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv16i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv2i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f32.nxv2i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv16i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv16i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv32i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv32i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv4i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv16i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv16i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv1i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv1i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv8i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv4i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv1i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv2i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv8i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv4i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv64i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv64i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv4i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv8i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv1i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv2i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv8i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv32i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv32i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv16i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv16i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv2i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f32.nxv2i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv16i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv16i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv32i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv32i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv4i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv16i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv16i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv1i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv1i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv8i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv4i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv1i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv2i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv8i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv4i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv64i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv64i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv4i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv8i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv1i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv2i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv8i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv32i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv32i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv16i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv16i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv2i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f32.nxv2i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv16i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv16i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv32i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv32i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv4i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv16i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv16i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv1i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv1i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv8i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv4i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv1i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv2i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv8i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv4i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv64i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv64i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv4i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv8i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv1i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv2i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv8i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv32i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv32i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv16i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv16i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv2i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f32.nxv2i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv16i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv16i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv32i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv32i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv4i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv16i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv16i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv1i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv1i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv8i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv4i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv1i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv2i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv8i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv4i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv64i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv64i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv4i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv8i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv1i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv2i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv8i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv32i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv32i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv16i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv16i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv2i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f32.nxv2i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv2f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv16i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv16i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv32i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv32i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv32i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv32i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv4i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv16i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv16i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv1i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv1i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv8i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv4i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv1i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv2i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv8i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv4i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv64i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv64i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv64i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv64i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv4i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv8i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv1i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv2i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv8i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv32i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv32i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv32i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv32i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv16i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv16i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv2i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f16.nxv2i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv16i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv16i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv16i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv32i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv32i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv32i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv4i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv16i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv16i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv16i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv1i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv1i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv8i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv4i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv1i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv2i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv8i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv4i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv4i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv64i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv64i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv64i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv4i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv8i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv8i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv1i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv2i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv8i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv32i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv32i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv32i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv16i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv16i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv16i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv2i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f16.nxv2i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv16i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv16i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv32i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv32i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv4i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv16i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv16i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv1i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv1i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv8i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv4i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv1i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv2i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv8i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv4i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv64i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv64i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv4i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv8i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv1i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv2i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv8i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv32i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv32i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv16i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv16i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv2i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f16.nxv2i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv16i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv16i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv32i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv32i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv4i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv16i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv16i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv1i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv1i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv8i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv4i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv1i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv2i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv8i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv4i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv64i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv64i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv4i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv8i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv1i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv2i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv8i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv32i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv32i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv16i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv16i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv2i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f16.nxv2i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv16i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv16i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv32i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv32i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv4i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv16i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv16i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv1i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv1i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv8i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv4i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv1i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv2i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv8i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv4i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv64i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv64i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv4i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv8i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv1i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv2i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv8i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv32i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv32i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv16i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv16i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv2i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f16.nxv2i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv16i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv16i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv32i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv32i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv4i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv16i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv16i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv1i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv1i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv8i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv4i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv1i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv2i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv8i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv4i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv64i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv64i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv4i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv8i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv1i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv2i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv8i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv32i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv32i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv16i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv16i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv2i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f16.nxv2i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv16i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv16i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv32i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv32i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv4i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv16i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv16i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv1i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv1i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv8i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv4i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv1i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv2i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv8i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv4i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv64i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv64i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv4i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv8i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv1i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv2i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv8i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv32i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv32i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv16i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv16i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv2i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f16.nxv2i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv1f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv16i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv16i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv16i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv16i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv32i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv32i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv32i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv32i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv4i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv4i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv16i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv16i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv16i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv16i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv1i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv1i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv1i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv1i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv8i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv8i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv4i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv4i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv1i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv1i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv2i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv2i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv8i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv8i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv4i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv4i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv64i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv64i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv64i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv64i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv4i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv4i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv8i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv8i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv1i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv1i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv2i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv2i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv8i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv8i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv32i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv32i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv32i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv32i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv16i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv16i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv16i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv16i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv2i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv2i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv1f32.nxv2i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv1f32.nxv2i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv1f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv16i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv16i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv16i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv16i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv32i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv32i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv32i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv32i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv4i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv16i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv16i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv16i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv16i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv1i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv1i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv8i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv8i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv4i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv1i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv2i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv8i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv8i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv4i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv64i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv64i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv64i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv64i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv4i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv8i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv8i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv1i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv2i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv8i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv8i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv8i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv32i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv32i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv32i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv32i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv16i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv16i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv16i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv16i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv2i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv1f32.nxv2i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv1f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv1f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv16i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv16i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv32i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv32i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv4i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv16i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv16i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv1i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv1i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv8i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv4i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv1i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv2i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv8i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv4i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv64i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv64i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv4i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv8i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv8i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv1i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv2i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv8i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv32i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv32i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv16i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv16i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv2i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv1f32.nxv2i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv1f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv1f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv16i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv16i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv32i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv32i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv4i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv16i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv16i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv1i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv1i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv8i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv4i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv1i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv2i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv8i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv4i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv64i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv64i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv4i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv8i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv1i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv2i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv8i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv32i8(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv32i8(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv16i32(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv16i32(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv2i16(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i16(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv1f32.nxv2i64(,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i64(,,,,, float*, , , i64) + +define void @test_vsxseg5_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv16i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv16i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv32i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv32i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv4i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv16i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv16i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv1i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv1i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv8i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv4i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv1i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv2i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv8i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv4i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv64i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv64i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv4i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv8i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv1i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv2i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv8i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv32i8(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv32i8(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv16i32(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv16i32(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv2i16(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i16(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv1f32.nxv2i64(,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i64(,,,,,, float*, , , i64) + +define void @test_vsxseg6_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv16i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv16i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv32i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv32i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv4i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv16i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv16i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv1i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv1i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv8i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv4i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv1i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv2i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv8i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv4i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv64i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv64i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv4i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv8i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv1i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv2i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv8i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv32i8(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv32i8(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv16i32(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv16i32(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv2i16(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i16(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv1f32.nxv2i64(,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i64(,,,,,,, float*, , , i64) + +define void @test_vsxseg7_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv16i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv16i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv32i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv32i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv4i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv16i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv16i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv1i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv1i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv8i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv4i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv1i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv2i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv8i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv4i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv64i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv64i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv4i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv8i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv1i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv2i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv8i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv32i8(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv32i8(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv16i32(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv16i32(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv2i16(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i16(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv1f32.nxv2i64(,,,,,,,, float*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i64(,,,,,,,, float*, , , i64) + +define void @test_vsxseg8_nxv1f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv1f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv1f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv1f32.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv16i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv16i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv32i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv32i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv32i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv32i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv4i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv16i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv16i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv1i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv1i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv8i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv4i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv1i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv2i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv8i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv4i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv64i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv64i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv64i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv64i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv4i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv8i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv1i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv2i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv8i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv32i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv32i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv32i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv32i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv16i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv16i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv2i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f16.nxv2i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv8f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv16i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv16i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv16i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv32i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv32i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv32i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv4i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv16i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv16i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv16i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv1i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv1i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv8i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv4i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv1i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv2i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv8i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv4i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv4i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv64i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv64i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv64i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv4i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv8i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv1i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv2i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv8i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv32i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv32i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv32i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv16i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv16i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv16i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv2i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv8f16.nxv2i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv8f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv8f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv8f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv8f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv16i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv16i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv32i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv32i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv4i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv16i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv16i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv1i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv1i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv8i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv4i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv1i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv2i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv8i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv4i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv64i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv64i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv4i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv8i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv1i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv2i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv8i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv32i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv32i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv16i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv16i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv2i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv8f16.nxv2i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv8f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv8f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv8f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv8f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv8f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv16i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv16i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv16i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv16i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv32i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv32i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv32i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv32i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv4i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv4i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv16i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv16i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv16i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv16i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv1i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv1i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv1i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv1i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv8i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv8i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv4i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv4i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv1i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv1i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv2i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv2i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv8i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv8i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv4i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv4i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv64i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv64i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv64i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv64i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv4i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv4i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv8i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv8i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv1i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv1i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv2i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv2i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv8i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv8i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv32i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv32i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv32i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v28, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v28, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv32i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv16i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv16i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv16i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv16i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv2i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv2i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv1r.v v25, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv8f32.nxv2i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv8f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv8f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv8f32.nxv2i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv8f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv8f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v16m4_v20m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv2r.v v26, v20 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv8f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv16i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv16i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv16i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv16i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv32i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv32i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv32i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv32i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv4i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv4i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv16i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv16i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv16i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv16i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv1i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv1i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv1i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv1i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv8i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv8i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv4i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv4i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv1i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv1i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv2i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv2i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv8i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv8i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv4i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv4i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv64i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv64i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv64i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv64i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv4i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv4i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv4i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv8i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv8i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv1i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv1i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv1i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv2i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv2i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv8i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv8i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv8i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv32i8(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv32i8(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv32i8( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv32i8( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv16i32(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv16i32(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv16i32( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv16i32( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv2i16(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i16(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv2i16( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i16( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f64.nxv2i64(,, double*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i64(,, double*, , , i64) + +define void @test_vsxseg2_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f64.nxv2i64( %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f64.nxv2i64( %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv16i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv16i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv16i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv16i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv32i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv32i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv32i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv32i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv4i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv4i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv16i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv16i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv16i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv16i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv1i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv1i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv1i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv1i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv8i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv8i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv4i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv4i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv1i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv1i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv2i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv8i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv8i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv4i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv4i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv64i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv64i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv64i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv64i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv4i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv4i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv4i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv8i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv8i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv1i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv1i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv1i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv2i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv8i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv8i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv8i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv32i8(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv32i8(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv32i8( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv32i8( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv16i32(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv16i32(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv16i32( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv16i32( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv2i16(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i16(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i16( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f64.nxv2i64(,,, double*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i64(,,, double*, , , i64) + +define void @test_vsxseg3_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f64.nxv2i64( %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv16i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv16i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv16i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv16i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv16i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv32i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv32i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv32i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv32i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv32i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv4i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv4i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv4i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv16i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv16i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv16i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv16i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv16i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv1i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv1i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv1i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv1i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv1i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv1i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv8i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv8i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv8i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv4i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv4i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv4i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv1i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv1i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv1i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv2i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv2i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv2i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv8i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv8i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv8i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv4i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv4i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv4i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv4i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv64i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv64i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv64i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv64i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv64i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv4i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv4i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv4i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv4i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv8i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv8i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv8i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv8i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv1i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv1i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv1i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv1i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv2i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv2i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv2i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv8i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv8i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv8i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv8i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv32i8(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv32i8(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv32i8( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv32i8( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv32i8( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv16i32(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv16i32(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv16i32( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv16i32( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv16i32( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv2i16(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i16(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv2i16( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv2i16( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i16( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f64.nxv2i64(,,,, double*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i64(,,,, double*, , , i64) + +define void @test_vsxseg4_nxv2f64_nxv2i64( %val, double* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f64_nxv2i64( %val, double* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f64.nxv2i64( %val, %val, %val, %val, double* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv16i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv16i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv32i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv32i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv32i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv32i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv4i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv16i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv16i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv1i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv1i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv8i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv4i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv1i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv2i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv8i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv4i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv64i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv64i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv64i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv64i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv4i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv8i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv1i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv2i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv8i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv32i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv32i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv32i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv32i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv16i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv16i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv2i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f16.nxv2i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv16i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv16i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv16i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv32i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv32i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv32i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv4i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv16i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv16i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv16i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv1i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv1i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv8i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv4i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv1i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv2i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv8i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv4i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv64i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv64i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv64i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv4i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv8i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv8i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv1i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv2i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv8i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv32i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv32i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv32i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv16i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv16i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv16i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv2i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f16.nxv2i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv16i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv16i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv32i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv32i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv4i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv16i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv16i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv1i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv1i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv8i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv4i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv1i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv2i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv8i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv4i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv64i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv64i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv4i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv8i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv1i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv2i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv8i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv32i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv32i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv16i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv16i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv2i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f16.nxv2i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv16i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv16i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv32i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv32i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv4i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv16i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv16i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv1i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv1i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv8i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv4i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv1i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv2i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv8i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv4i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv64i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv64i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv4i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv8i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv1i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv2i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv8i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv32i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv32i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv16i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv16i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv2i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv4f16.nxv2i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv16i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv16i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv32i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv32i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv4i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv16i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv16i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv1i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv1i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv8i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv4i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv1i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv2i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv8i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv4i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv64i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv64i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv4i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv8i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv1i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv2i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv8i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv32i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv32i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv16i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv16i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv2i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv4f16.nxv2i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv16i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv16i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv32i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv32i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv4i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv16i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv16i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv1i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv1i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv8i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv4i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv1i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv2i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv8i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv4i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv64i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv64i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv4i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv8i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv1i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv2i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv8i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv32i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv32i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv16i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv16i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv2i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv4f16.nxv2i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv16i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv16i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv32i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv32i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv4i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv16i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv16i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv1i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv1i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv8i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv4i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv1i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv2i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv8i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv4i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv64i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv64i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv4i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv8i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv1i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv2i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv8i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv32i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv32i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv16i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv16i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv2i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv4f16.nxv2i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv4f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv4f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv4f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv4f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv16i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv16i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv16i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv16i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv32i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv32i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv32i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv32i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv4i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv4i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv16i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv16i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv16i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv16i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv1i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv1i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv1i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv1i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv8i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv8i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv4i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv4i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv1i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv1i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv2i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv2i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv8i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv8i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv4i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv4i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv64i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv64i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv64i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv64i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv4i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv4i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv4i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv8i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv8i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv1i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv1i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv1i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv2i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv2i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv8i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv8i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv8i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv32i8(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv32i8(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv32i8( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv32i8( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv16i32(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv16i32(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv16i32( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv16i32( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv2i16(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i16(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv2i16( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v16_v17 def $v16_v17 +; CHECK-NEXT: vmv1r.v v25, v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i16( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv2f16.nxv2i64(,, half*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i64(,, half*, , , i64) + +define void @test_vsxseg2_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv2f16.nxv2i64( %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv2f16.nxv2i64( %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv16i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv16i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv16i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv16i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv32i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv32i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv32i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv32i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv4i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv4i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv16i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv16i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv16i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv16i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv1i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv1i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv1i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv1i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv8i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv8i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv4i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv4i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv1i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv1i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv2i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv8i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv8i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv4i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv4i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv64i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv64i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv64i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv64i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv4i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv4i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv4i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv8i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv8i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv1i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv1i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv1i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv2i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv8i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv8i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv8i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv32i8(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv32i8(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv32i8( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv32i8( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv16i32(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv16i32(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv16i32( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv16i32( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv2i16(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i16(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i16( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv2f16.nxv2i64(,,, half*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i64(,,, half*, , , i64) + +define void @test_vsxseg3_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv2f16.nxv2i64( %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv16i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv16i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv32i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv32i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv4i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv16i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv16i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv1i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv1i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv8i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv4i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv1i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv2i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv8i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv4i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv64i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv64i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv4i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv8i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv1i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv2i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv8i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv32i8(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv32i8(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv16i32(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv16i32(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv2i16(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i16(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv2f16.nxv2i64(,,,, half*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i64(,,,, half*, , , i64) + +define void @test_vsxseg4_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv16i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv16i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv32i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv32i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv4i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv16i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv16i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv1i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv1i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv8i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv4i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv1i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv2i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv8i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv4i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv64i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv64i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv4i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv8i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv1i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv2i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv8i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv32i8(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv32i8(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv16i32(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv16i32(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv2i16(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i16(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg5.nxv2f16.nxv2i64(,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i64(,,,,, half*, , , i64) + +define void @test_vsxseg5_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg5_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg5_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg5ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg5.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv16i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv16i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv32i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv32i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv4i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv16i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv16i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv1i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv1i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv8i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv4i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv1i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv2i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv8i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv4i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv64i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv64i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv4i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv8i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv1i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv2i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv8i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv32i8(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv32i8(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv16i32(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv16i32(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv2i16(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i16(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg6.nxv2f16.nxv2i64(,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i64(,,,,,, half*, , , i64) + +define void @test_vsxseg6_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg6_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg6_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg6ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg6.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv16i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv16i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv32i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv32i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv4i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv16i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv16i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv1i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv1i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv8i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv4i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv1i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv2i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv8i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv4i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv64i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv64i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv4i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv8i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv1i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv2i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv8i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv32i8(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv32i8(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv16i32(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv16i32(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv2i16(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i16(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg7.nxv2f16.nxv2i64(,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i64(,,,,,,, half*, , , i64) + +define void @test_vsxseg7_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg7_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg7_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg7ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg7.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv16i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv16i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv16i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv16i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv16i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv32i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv32i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv32i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv32i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv32i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv4i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv4i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv4i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv16i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv16i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv16i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv16i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv16i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv1i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv1i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv1i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv1i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv1i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv1i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv8i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv8i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv8i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv4i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv4i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv4i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv1i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv1i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv1i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv2i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv2i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv2i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv8i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv8i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv8i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv4i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv4i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv4i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv64i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv64i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv64i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv64i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv64i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv4i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv4i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv4i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv8i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv8i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv8i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv1i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv1i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv1i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv2i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv2i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv2i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv8i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv8i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv8i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv8i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv32i8(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv32i8(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv32i8( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv32i8( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei8.v v1, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv32i8( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv16i32(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv16i32(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv16i32( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv16i32( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a2, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv16i32( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv2i16(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i16(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv2i16( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v0, (a0), v17 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv2i16( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei16.v v1, (a0), v17, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg8.nxv2f16.nxv2i64(,,,,,,,, half*, , i64) +declare void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i64(,,,,,,,, half*, , , i64) + +define void @test_vsxseg8_nxv2f16_nxv2i64( %val, half* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vmv1r.v v2, v0 +; CHECK-NEXT: vmv1r.v v3, v0 +; CHECK-NEXT: vmv1r.v v4, v0 +; CHECK-NEXT: vmv1r.v v5, v0 +; CHECK-NEXT: vmv1r.v v6, v0 +; CHECK-NEXT: vmv1r.v v7, v0 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg8_mask_nxv2f16_nxv2i64( %val, half* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg8_mask_nxv2f16_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v1, v16 +; CHECK-NEXT: vmv1r.v v2, v1 +; CHECK-NEXT: vmv1r.v v3, v1 +; CHECK-NEXT: vmv1r.v v4, v1 +; CHECK-NEXT: vmv1r.v v5, v1 +; CHECK-NEXT: vmv1r.v v6, v1 +; CHECK-NEXT: vmv1r.v v7, v1 +; CHECK-NEXT: vmv1r.v v8, v1 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsxseg8ei64.v v1, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg8.mask.nxv2f16.nxv2i64( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv16i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv16i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv16i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv16i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv32i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv32i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv32i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv32i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv4i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv4i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv16i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv16i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv16i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv16i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv1i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv1i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv1i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv1i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv8i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv8i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv4i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv4i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv1i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv1i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv2i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv2i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv8i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv8i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv4i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv4i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv64i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv64i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv64i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv64i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv4i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv4i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv4i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv8i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv8i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv1i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv1i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv1i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv2i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv2i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv8i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv8i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv8i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv32i8(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv32i8(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv32i8( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei8.v v16, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv32i8( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv16i32(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv16i32(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv16i32( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv16i32( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv2i16(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i16(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv2i16( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv1r.v v25, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei16.v v16, (a0), v25, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i16( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg2.nxv4f32.nxv2i64(,, float*, , i64) +declare void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i64(,, float*, , , i64) + +define void @test_vsxseg2_nxv4f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.nxv4f32.nxv2i64( %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg2_mask_nxv4f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg2_mask_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v16m2_v18m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v26, v18 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg2ei64.v v16, (a0), v26, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg2.mask.nxv4f32.nxv2i64( %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv16i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv16i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv16i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv16i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv32i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv32i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv32i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv32i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv4i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv16i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv16i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv16i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv16i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv1i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv1i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv1i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv1i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv8i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv8i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv4i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv1i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv1i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv2i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv2i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv8i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv8i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv4i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv64i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv64i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv64i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv64i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv4i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv4i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv8i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv8i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv1i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv1i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv1i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv2i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv2i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv8i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv8i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv8i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv32i8(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv32i8(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv32i8( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv32i8( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv16i32(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv16i32(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv16i32( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv16i32( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv2i16(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i16(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv2i16( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i16( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg3.nxv4f32.nxv2i64(,,, float*, , i64) +declare void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i64(,,, float*, , , i64) + +define void @test_vsxseg3_nxv4f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.nxv4f32.nxv2i64( %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg3_mask_nxv4f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg3_mask_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg3ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg3.mask.nxv4f32.nxv2i64( %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv16i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv16i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv16i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv16i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv16i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv32i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv32i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv32i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv32i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv32i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu +; CHECK-NEXT: vle16.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv32i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv4i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv4i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv4i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv16i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv16i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv16i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv16i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv16i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv1i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv1i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv1i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv1i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv1i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv1i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv8i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv8i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv8i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv4i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv4i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv4i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv1i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv1i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv1i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv2i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv2i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv2i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv8i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv8i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv8i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv4i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv4i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv4i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv64i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv64i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv64i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv64i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv64i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv64i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv4i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv4i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv4i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv4i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv8i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv8i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv8i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv8i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv8i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv1i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv1i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv1i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv1i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv2i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv2i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv2i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv8i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv8i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv8i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv8i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv32i8(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv32i8(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv32i8( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v0, (a0), v20 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv32i8( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei8.v v2, (a0), v20, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv32i8( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv16i32(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv16i32(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv16i32( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv16i32( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv16i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a1) +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a2, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei32.v v16, (a0), v8, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv16i32( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv2i16(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i16(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv2i16( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv2i16( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei16.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i16( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsxseg4.nxv4f32.nxv2i64(,,,, float*, , i64) +declare void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i64(,,,, float*, , , i64) + +define void @test_vsxseg4_nxv4f32_nxv2i64( %val, float* %base, %index, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v0, v16 +; CHECK-NEXT: vmv2r.v v2, v0 +; CHECK-NEXT: vmv2r.v v4, v0 +; CHECK-NEXT: vmv2r.v v6, v0 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v0, (a0), v18 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.nxv4f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, i64 %vl) + ret void +} + +define void @test_vsxseg4_mask_nxv4f32_nxv2i64( %val, float* %base, %index, %mask, i64 %vl) { +; CHECK-LABEL: test_vsxseg4_mask_nxv4f32_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v2, v16 +; CHECK-NEXT: vmv2r.v v4, v2 +; CHECK-NEXT: vmv2r.v v6, v2 +; CHECK-NEXT: vmv2r.v v8, v2 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsxseg4ei64.v v2, (a0), v18, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsxseg4.mask.nxv4f32.nxv2i64( %val, %val, %val, %val, float* %base, %index, %mask, i64 %vl) + ret void +} +