diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -488,6 +488,25 @@ llvm_anyint_ty]), [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For unit stride segment store + // Input: (value, pointer, vl) + class RISCVUSSegStore + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyint_ty]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For unit stride segment store with mask + // Input: (value, pointer, mask, vl) + class RISCVUSSegStoreMask + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; @@ -590,6 +609,10 @@ def "int_riscv_" # NAME : RISCVUSSegLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask; } + multiclass RISCVUSSegStore { + def "int_riscv_" # NAME : RISCVUSSegStore; + def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -873,6 +896,7 @@ foreach nf = [2, 3, 4, 5, 6, 7, 8] in { defm vlseg # nf : RISCVUSSegLoad; + defm vsseg # nf : RISCVUSSegStore; } } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -57,6 +57,8 @@ void selectVLSEG(SDNode *Node, unsigned IntNo); void selectVLSEGMask(SDNode *Node, unsigned IntNo); + void selectVSSEG(SDNode *Node, unsigned IntNo); + void selectVSSEGMask(SDNode *Node, unsigned IntNo); // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -204,6 +204,50 @@ CurDAG->RemoveDeadNode(Node); } +void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumOperands() - 4; + EVT VT = Node->getOperand(2)->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = {StoreVal, + Node->getOperand(2 + NF), // Base pointer. + Node->getOperand(3 + NF), // VL. + SEW, Node->getOperand(0)}; // Chain + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, ScalarSize, static_cast(LMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); + ReplaceNode(Node, Store); +} + +void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo) { + SDLoc DL(Node); + unsigned NF = Node->getNumOperands() - 5; + EVT VT = Node->getOperand(2)->getValueType(0); + unsigned ScalarSize = VT.getScalarSizeInBits(); + MVT XLenVT = Subtarget->getXLenVT(); + RISCVVLMUL LMUL = getLMUL(VT); + SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); + SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); + SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); + SDValue Operands[] = {StoreVal, + Node->getOperand(2 + NF), // Base pointer. + Node->getOperand(3 + NF), // Mask. + Node->getOperand(4 + NF), // VL. + SEW, + Node->getOperand(0)}; // Chain + const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( + IntNo, ScalarSize, static_cast(LMUL)); + SDNode *Store = + CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); + ReplaceNode(Node, Store); +} + void RISCVDAGToDAGISel::Select(SDNode *Node) { // If we have a custom node, we have already selected. if (Node->isMachineOpcode()) { @@ -349,6 +393,32 @@ } break; } + case ISD::INTRINSIC_VOID: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + switch (IntNo) { + case Intrinsic::riscv_vsseg2: + case Intrinsic::riscv_vsseg3: + case Intrinsic::riscv_vsseg4: + case Intrinsic::riscv_vsseg5: + case Intrinsic::riscv_vsseg6: + case Intrinsic::riscv_vsseg7: + case Intrinsic::riscv_vsseg8: { + selectVSSEG(Node, IntNo); + return; + } + case Intrinsic::riscv_vsseg2_mask: + case Intrinsic::riscv_vsseg3_mask: + case Intrinsic::riscv_vsseg4_mask: + case Intrinsic::riscv_vsseg5_mask: + case Intrinsic::riscv_vsseg6_mask: + case Intrinsic::riscv_vsseg7_mask: + case Intrinsic::riscv_vsseg8_mask: { + selectVSSEGMask(Node, IntNo); + return; + } + } + break; + } } // Select the default instruction. diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -410,7 +410,8 @@ } class ToLowerCase { - string L = !subst("VLSEG", "vlseg", Upper); + string L = !subst("VLSEG", "vlseg", + !subst("VSSEG", "vsseg", Upper)); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -912,6 +913,38 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoUSSegStoreNoMask EEW>: + Pseudo<(outs), + (ins RetClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUSSegStoreMask EEW>: + Pseudo<(outs), + (ins RetClass:$rd, GPR:$rs1, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let HasVLOp = 1; + let HasSEWOp = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1435,6 +1468,21 @@ } } +multiclass VPseudoUSSegStore { + foreach eew = EEWList in { + foreach lmul = MxSet.m in { + defvar LInfo = lmul.MX; + let VLMul = lmul.value in { + foreach nf = NFSet.L in { + defvar vreg = SegRegClass.RC; + def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask; + } + } + } + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -2564,6 +2612,7 @@ // 7.8. Vector Load/Store Segment Instructions //===----------------------------------------------------------------------===// defm PseudoVLSEG : VPseudoUSSegLoad; +defm PseudoVSSEG : VPseudoUSSegStore; //===----------------------------------------------------------------------===// // Pseudo Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg.ll @@ -0,0 +1,4778 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare void @llvm.riscv.vsseg2.nxv16i16(,, i16* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv16i16(,, i16*, , i64) + +define void @test_vsseg2_nxv16i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv16i16( %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv16i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv16i16( %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4i32(,, i32* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4i32(,, i32*, , i64) + +define void @test_vsseg2_nxv4i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4i32( %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4i32( %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv4i32(,,, i32* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv4i32(,,, i32*, , i64) + +define void @test_vsseg3_nxv4i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv4i32( %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv4i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv4i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv4i32(,,,, i32* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv4i32(,,,, i32*, , i64) + +define void @test_vsseg4_nxv4i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv4i32( %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv4i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv4i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv16i8(,, i8* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv16i8(,, i8*, , i64) + +define void @test_vsseg2_nxv16i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv16i8( %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv16i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv16i8( %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv16i8(,,, i8* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv16i8(,,, i8*, , i64) + +define void @test_vsseg3_nxv16i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv16i8( %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv16i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv16i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv16i8(,,,, i8* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv16i8(,,,, i8*, , i64) + +define void @test_vsseg4_nxv16i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv16i8( %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv16i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv16i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1i64(,, i64* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1i64(,, i64*, , i64) + +define void @test_vsseg2_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1i64( %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1i64( %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1i64(,,, i64* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1i64(,,, i64*, , i64) + +define void @test_vsseg3_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1i64( %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1i64( %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1i64(,,,, i64* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1i64(,,,, i64*, , i64) + +define void @test_vsseg4_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1i64( %val, %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1i64(,,,,, i64* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1i64(,,,,, i64*, , i64) + +define void @test_vsseg5_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg5e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1i64( %val, %val, %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg5e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1i64(,,,,,, i64* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1i64(,,,,,, i64*, , i64) + +define void @test_vsseg6_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg6e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg6e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1i64(,,,,,,, i64* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1i64(,,,,,,, i64*, , i64) + +define void @test_vsseg7_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg7e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg7e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1i64(,,,,,,,, i64* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1i64(,,,,,,,, i64*, , i64) + +define void @test_vsseg8_nxv1i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg8e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg8e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1i32(,, i32* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1i32(,, i32*, , i64) + +define void @test_vsseg2_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1i32( %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1i32( %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1i32(,,, i32* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1i32(,,, i32*, , i64) + +define void @test_vsseg3_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1i32( %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1i32(,,,, i32* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1i32(,,,, i32*, , i64) + +define void @test_vsseg4_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1i32( %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1i32(,,,,, i32* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1i32(,,,,, i32*, , i64) + +define void @test_vsseg5_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1i32(,,,,,, i32* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1i32(,,,,,, i32*, , i64) + +define void @test_vsseg6_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1i32(,,,,,,, i32* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1i32(,,,,,,, i32*, , i64) + +define void @test_vsseg7_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1i32(,,,,,,,, i32* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1i32(,,,,,,,, i32*, , i64) + +define void @test_vsseg8_nxv1i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv8i16(,, i16* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv8i16(,, i16*, , i64) + +define void @test_vsseg2_nxv8i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv8i16( %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv8i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv8i16( %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv8i16(,,, i16* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv8i16(,,, i16*, , i64) + +define void @test_vsseg3_nxv8i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv8i16( %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv8i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv8i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv8i16(,,,, i16* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv8i16(,,,, i16*, , i64) + +define void @test_vsseg4_nxv8i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv8i16( %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv8i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv8i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4i8(,, i8* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4i8(,, i8*, , i64) + +define void @test_vsseg2_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4i8( %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4i8( %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv4i8(,,, i8* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv4i8(,,, i8*, , i64) + +define void @test_vsseg3_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv4i8( %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv4i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv4i8(,,,, i8* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv4i8(,,,, i8*, , i64) + +define void @test_vsseg4_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv4i8( %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv4i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv4i8(,,,,, i8* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv4i8(,,,,, i8*, , i64) + +define void @test_vsseg5_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv4i8(,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv4i8(,,,,,, i8*, , i64) + +define void @test_vsseg6_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv4i8(,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv4i8(,,,,,,, i8*, , i64) + +define void @test_vsseg7_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv4i8(,,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv4i8(,,,,,,,, i8*, , i64) + +define void @test_vsseg8_nxv4i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv4i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1i16(,, i16* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1i16(,, i16*, , i64) + +define void @test_vsseg2_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1i16( %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1i16( %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1i16(,,, i16* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1i16(,,, i16*, , i64) + +define void @test_vsseg3_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1i16( %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1i16(,,,, i16* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1i16(,,,, i16*, , i64) + +define void @test_vsseg4_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1i16( %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1i16(,,,,, i16* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1i16(,,,,, i16*, , i64) + +define void @test_vsseg5_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1i16(,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1i16(,,,,,, i16*, , i64) + +define void @test_vsseg6_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1i16(,,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1i16(,,,,,,, i16*, , i64) + +define void @test_vsseg7_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1i16(,,,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1i16(,,,,,,,, i16*, , i64) + +define void @test_vsseg8_nxv1i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2i32(,, i32* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2i32(,, i32*, , i64) + +define void @test_vsseg2_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2i32( %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2i32( %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2i32(,,, i32* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2i32(,,, i32*, , i64) + +define void @test_vsseg3_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2i32( %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2i32( %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2i32(,,,, i32* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2i32(,,,, i32*, , i64) + +define void @test_vsseg4_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2i32( %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2i32( %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv2i32(,,,,, i32* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv2i32(,,,,, i32*, , i64) + +define void @test_vsseg5_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv2i32(,,,,,, i32* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv2i32(,,,,,, i32*, , i64) + +define void @test_vsseg6_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv2i32(,,,,,,, i32* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv2i32(,,,,,,, i32*, , i64) + +define void @test_vsseg7_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv2i32(,,,,,,,, i32* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv2i32(,,,,,,,, i32*, , i64) + +define void @test_vsseg8_nxv2i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv2i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv8i8(,, i8* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv8i8(,, i8*, , i64) + +define void @test_vsseg2_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv8i8( %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv8i8( %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv8i8(,,, i8* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv8i8(,,, i8*, , i64) + +define void @test_vsseg3_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv8i8( %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv8i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv8i8(,,,, i8* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv8i8(,,,, i8*, , i64) + +define void @test_vsseg4_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv8i8( %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv8i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv8i8(,,,,, i8* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv8i8(,,,,, i8*, , i64) + +define void @test_vsseg5_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv8i8(,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv8i8(,,,,,, i8*, , i64) + +define void @test_vsseg6_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv8i8(,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv8i8(,,,,,,, i8*, , i64) + +define void @test_vsseg7_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv8i8(,,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv8i8(,,,,,,,, i8*, , i64) + +define void @test_vsseg8_nxv8i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv8i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4i64(,, i64* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4i64(,, i64*, , i64) + +define void @test_vsseg2_nxv4i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4i64( %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4i64( %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4i16(,, i16* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4i16(,, i16*, , i64) + +define void @test_vsseg2_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4i16( %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4i16( %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv4i16(,,, i16* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv4i16(,,, i16*, , i64) + +define void @test_vsseg3_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv4i16( %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv4i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv4i16(,,,, i16* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv4i16(,,,, i16*, , i64) + +define void @test_vsseg4_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv4i16( %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv4i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv4i16(,,,,, i16* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv4i16(,,,,, i16*, , i64) + +define void @test_vsseg5_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv4i16(,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv4i16(,,,,,, i16*, , i64) + +define void @test_vsseg6_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv4i16(,,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv4i16(,,,,,,, i16*, , i64) + +define void @test_vsseg7_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv4i16(,,,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv4i16(,,,,,,,, i16*, , i64) + +define void @test_vsseg8_nxv4i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv4i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1i8(,, i8* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1i8(,, i8*, , i64) + +define void @test_vsseg2_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1i8( %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1i8( %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1i8(,,, i8* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1i8(,,, i8*, , i64) + +define void @test_vsseg3_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1i8( %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1i8(,,,, i8* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1i8(,,,, i8*, , i64) + +define void @test_vsseg4_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1i8( %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1i8(,,,,, i8* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1i8(,,,,, i8*, , i64) + +define void @test_vsseg5_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1i8(,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1i8(,,,,,, i8*, , i64) + +define void @test_vsseg6_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1i8(,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1i8(,,,,,,, i8*, , i64) + +define void @test_vsseg7_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1i8(,,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1i8(,,,,,,,, i8*, , i64) + +define void @test_vsseg8_nxv1i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2i8(,, i8* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2i8(,, i8*, , i64) + +define void @test_vsseg2_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2i8( %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2i8( %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2i8(,,, i8* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2i8(,,, i8*, , i64) + +define void @test_vsseg3_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2i8( %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg3e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2i8( %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2i8(,,,, i8* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2i8(,,,, i8*, , i64) + +define void @test_vsseg4_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2i8( %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg4e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2i8( %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv2i8(,,,,, i8* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv2i8(,,,,, i8*, , i64) + +define void @test_vsseg5_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg5e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv2i8(,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv2i8(,,,,,, i8*, , i64) + +define void @test_vsseg6_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg6e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv2i8(,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv2i8(,,,,,,, i8*, , i64) + +define void @test_vsseg7_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg7e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv2i8(,,,,,,,, i8* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv2i8(,,,,,,,, i8*, , i64) + +define void @test_vsseg8_nxv2i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv2i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vsseg8e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv8i32(,, i32* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv8i32(,, i32*, , i64) + +define void @test_vsseg2_nxv8i32( %val, i32* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv8i32( %val, %val, i32* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv8i32( %val, i32* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv8i32( %val, %val, i32* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv32i8(,, i8* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv32i8(,, i8*, , i64) + +define void @test_vsseg2_nxv32i8( %val, i8* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv32i8( %val, %val, i8* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv32i8( %val, i8* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vsseg2e8.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv32i8( %val, %val, i8* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2i16(,, i16* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2i16(,, i16*, , i64) + +define void @test_vsseg2_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2i16( %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2i16( %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2i16(,,, i16* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2i16(,,, i16*, , i64) + +define void @test_vsseg3_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2i16( %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2i16( %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2i16(,,,, i16* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2i16(,,,, i16*, , i64) + +define void @test_vsseg4_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2i16( %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2i16( %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv2i16(,,,,, i16* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv2i16(,,,,, i16*, , i64) + +define void @test_vsseg5_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv2i16(,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv2i16(,,,,,, i16*, , i64) + +define void @test_vsseg6_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv2i16(,,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv2i16(,,,,,,, i16*, , i64) + +define void @test_vsseg7_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv2i16(,,,,,,,, i16* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv2i16(,,,,,,,, i16*, , i64) + +define void @test_vsseg8_nxv2i16( %val, i16* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv2i16( %val, i16* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2i64(,, i64* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2i64(,, i64*, , i64) + +define void @test_vsseg2_nxv2i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2i64( %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2i64( %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2i64(,,, i64* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2i64(,,, i64*, , i64) + +define void @test_vsseg3_nxv2i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2i64( %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2i64( %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2i64(,,,, i64* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2i64(,,,, i64*, , i64) + +define void @test_vsseg4_nxv2i64( %val, i64* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2i64( %val, %val, %val, %val, i64* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2i64( %val, i64* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2i64( %val, %val, %val, %val, i64* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv16f16(,, half* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv16f16(,, half*, , i64) + +define void @test_vsseg2_nxv16f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv16f16( %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv16f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv16f16( %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4f64(,, double* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4f64(,, double*, , i64) + +define void @test_vsseg2_nxv4f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4f64( %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4f64( %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1f64(,, double* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1f64(,, double*, , i64) + +define void @test_vsseg2_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1f64( %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1f64( %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1f64(,,, double* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1f64(,,, double*, , i64) + +define void @test_vsseg3_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1f64( %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1f64( %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1f64(,,,, double* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1f64(,,,, double*, , i64) + +define void @test_vsseg4_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1f64( %val, %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1f64(,,,,, double* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1f64(,,,,, double*, , i64) + +define void @test_vsseg5_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg5e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1f64( %val, %val, %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg5e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1f64( %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1f64(,,,,,, double* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1f64(,,,,,, double*, , i64) + +define void @test_vsseg6_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg6e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg6e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1f64( %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1f64(,,,,,,, double* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1f64(,,,,,,, double*, , i64) + +define void @test_vsseg7_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg7e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg7e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1f64(,,,,,,,, double* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1f64(,,,,,,,, double*, , i64) + +define void @test_vsseg8_nxv1f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg8e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vsseg8e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1f64( %val, %val, %val, %val, %val, %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2f32(,, float* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2f32(,, float*, , i64) + +define void @test_vsseg2_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2f32( %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2f32( %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2f32(,,, float* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2f32(,,, float*, , i64) + +define void @test_vsseg3_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2f32( %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2f32( %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2f32(,,,, float* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2f32(,,,, float*, , i64) + +define void @test_vsseg4_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2f32( %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv2f32(,,,,, float* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv2f32(,,,,, float*, , i64) + +define void @test_vsseg5_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv2f32( %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv2f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv2f32(,,,,,, float* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv2f32(,,,,,, float*, , i64) + +define void @test_vsseg6_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv2f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv2f32(,,,,,,, float* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv2f32(,,,,,,, float*, , i64) + +define void @test_vsseg7_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv2f32(,,,,,,,, float* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv2f32(,,,,,,,, float*, , i64) + +define void @test_vsseg8_nxv2f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv2f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv2f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1f16(,, half* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1f16(,, half*, , i64) + +define void @test_vsseg2_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1f16( %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1f16( %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1f16(,,, half* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1f16(,,, half*, , i64) + +define void @test_vsseg3_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1f16( %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1f16( %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1f16(,,,, half* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1f16(,,,, half*, , i64) + +define void @test_vsseg4_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1f16( %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1f16(,,,,, half* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1f16(,,,,, half*, , i64) + +define void @test_vsseg5_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1f16( %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1f16(,,,,,, half* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1f16(,,,,,, half*, , i64) + +define void @test_vsseg6_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1f16(,,,,,,, half* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1f16(,,,,,,, half*, , i64) + +define void @test_vsseg7_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1f16(,,,,,,,, half* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1f16(,,,,,,,, half*, , i64) + +define void @test_vsseg8_nxv1f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv1f32(,, float* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv1f32(,, float*, , i64) + +define void @test_vsseg2_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv1f32( %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv1f32( %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv1f32(,,, float* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv1f32(,,, float*, , i64) + +define void @test_vsseg3_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv1f32( %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv1f32( %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv1f32(,,,, float* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv1f32(,,,, float*, , i64) + +define void @test_vsseg4_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv1f32( %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv1f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv1f32(,,,,, float* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv1f32(,,,,, float*, , i64) + +define void @test_vsseg5_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv1f32( %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg5e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv1f32( %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv1f32(,,,,,, float* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv1f32(,,,,,, float*, , i64) + +define void @test_vsseg6_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg6e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv1f32( %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv1f32(,,,,,,, float* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv1f32(,,,,,,, float*, , i64) + +define void @test_vsseg7_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg7e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv1f32(,,,,,,,, float* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv1f32(,,,,,,,, float*, , i64) + +define void @test_vsseg8_nxv1f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv1f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vsseg8e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv1f32( %val, %val, %val, %val, %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv8f16(,, half* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv8f16(,, half*, , i64) + +define void @test_vsseg2_nxv8f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv8f16( %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv8f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv8f16( %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv8f16(,,, half* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv8f16(,,, half*, , i64) + +define void @test_vsseg3_nxv8f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv8f16( %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv8f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv8f16( %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv8f16(,,,, half* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv8f16(,,,, half*, , i64) + +define void @test_vsseg4_nxv8f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv8f16( %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv8f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv8f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv8f32(,, float* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv8f32(,, float*, , i64) + +define void @test_vsseg2_nxv8f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv8f32( %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv8f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv8f32( %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2f64(,, double* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2f64(,, double*, , i64) + +define void @test_vsseg2_nxv2f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2f64( %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg2e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2f64( %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2f64(,,, double* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2f64(,,, double*, , i64) + +define void @test_vsseg3_nxv2f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2f64( %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg3e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2f64( %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2f64(,,,, double* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2f64(,,,, double*, , i64) + +define void @test_vsseg4_nxv2f64( %val, double* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2f64( %val, %val, %val, %val, double* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2f64( %val, double* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vsseg4e64.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2f64( %val, %val, %val, %val, double* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4f16(,, half* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4f16(,, half*, , i64) + +define void @test_vsseg2_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4f16( %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4f16( %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv4f16(,,, half* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv4f16(,,, half*, , i64) + +define void @test_vsseg3_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv4f16( %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv4f16( %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv4f16(,,,, half* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv4f16(,,,, half*, , i64) + +define void @test_vsseg4_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv4f16( %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv4f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv4f16(,,,,, half* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv4f16(,,,,, half*, , i64) + +define void @test_vsseg5_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv4f16( %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv4f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv4f16(,,,,,, half* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv4f16(,,,,,, half*, , i64) + +define void @test_vsseg6_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv4f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv4f16(,,,,,,, half* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv4f16(,,,,,,, half*, , i64) + +define void @test_vsseg7_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv4f16(,,,,,,,, half* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv4f16(,,,,,,,, half*, , i64) + +define void @test_vsseg8_nxv4f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv4f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv4f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv2f16(,, half* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv2f16(,, half*, , i64) + +define void @test_vsseg2_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv2f16( %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg2e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv2f16( %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv2f16(,,, half* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv2f16(,,, half*, , i64) + +define void @test_vsseg3_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv2f16( %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg3e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv2f16( %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv2f16(,,,, half* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv2f16(,,,, half*, , i64) + +define void @test_vsseg4_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv2f16( %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg4e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv2f16( %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg5.nxv2f16(,,,,, half* , i64) +declare void @llvm.riscv.vsseg5.mask.nxv2f16(,,,,, half*, , i64) + +define void @test_vsseg5_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg5_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.nxv2f16( %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg5_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg5_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg5e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.nxv2f16( %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg6.nxv2f16(,,,,,, half* , i64) +declare void @llvm.riscv.vsseg6.mask.nxv2f16(,,,,,, half*, , i64) + +define void @test_vsseg6_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg6_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg6_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg6_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg6e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.nxv2f16( %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg7.nxv2f16(,,,,,,, half* , i64) +declare void @llvm.riscv.vsseg7.mask.nxv2f16(,,,,,,, half*, , i64) + +define void @test_vsseg7_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg7_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg7_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg7_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg7e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg8.nxv2f16(,,,,,,,, half* , i64) +declare void @llvm.riscv.vsseg8.mask.nxv2f16(,,,,,,,, half*, , i64) + +define void @test_vsseg8_nxv2f16( %val, half* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg8_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, i64 %vl) + ret void +} + +define void @test_vsseg8_mask_nxv2f16( %val, half* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg8_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vsseg8e16.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.nxv2f16( %val, %val, %val, %val, %val, %val, %val, %val, half* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg2.nxv4f32(,, float* , i64) +declare void @llvm.riscv.vsseg2.mask.nxv4f32(,, float*, , i64) + +define void @test_vsseg2_nxv4f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg2_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.nxv4f32( %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg2_mask_nxv4f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg2_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg2e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.nxv4f32( %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg3.nxv4f32(,,, float* , i64) +declare void @llvm.riscv.vsseg3.mask.nxv4f32(,,, float*, , i64) + +define void @test_vsseg3_nxv4f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg3_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.nxv4f32( %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg3_mask_nxv4f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg3_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg3e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.nxv4f32( %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vsseg4.nxv4f32(,,,, float* , i64) +declare void @llvm.riscv.vsseg4.mask.nxv4f32(,,,, float*, , i64) + +define void @test_vsseg4_nxv4f32( %val, float* %base, i64 %vl) { +; CHECK-LABEL: test_vsseg4_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0) +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.nxv4f32( %val, %val, %val, %val, float* %base, i64 %vl) + ret void +} + +define void @test_vsseg4_mask_nxv4f32( %val, float* %base, %mask, i64 %vl) { +; CHECK-LABEL: test_vsseg4_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vsseg4e32.v v16, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.nxv4f32( %val, %val, %val, %val, float* %base, %mask, i64 %vl) + ret void +} +