diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -501,6 +501,26 @@ llvm_anyint_ty]), [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For stride segment store + // Input: (value, pointer, offset, vl) + class RISCVSSegStore + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyint_ty, + LLVMMatchType<1>]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For stride segment store with mask + // Input: (value, pointer, offset, mask, vl) + class RISCVSSegStoreMask + : Intrinsic<[], + !listconcat([llvm_anyvector_ty], + !listsplat(LLVMMatchType<0>, !add(nf, -1)), + [LLVMPointerToElt<0>, llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<1>]), + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; @@ -604,6 +624,10 @@ def "int_riscv_" # NAME : RISCVUSSegStore; def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask; } + multiclass RISCVSSegStore { + def "int_riscv_" # NAME : RISCVSSegStore; + def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -885,6 +909,7 @@ defm vlseg # nf : RISCVUSSegLoad; defm vlsseg # nf : RISCVSSegLoad; defm vsseg # nf : RISCVUSSegStore; + defm vssseg # nf : RISCVSSegStore; } } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h @@ -56,8 +56,8 @@ void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStride); void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStride); - void selectVSSEG(SDNode *Node, unsigned IntNo); - void selectVSSEGMask(SDNode *Node, unsigned IntNo); + void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStride); + void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStride); // Include the pieces autogenerated from the target description. #include "RISCVGenDAGISel.inc" diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -229,9 +229,11 @@ CurDAG->RemoveDeadNode(Node); } -void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) { +void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStride) { SDLoc DL(Node); unsigned NF = Node->getNumOperands() - 4; + if (IsStride) + NF--; EVT VT = Node->getOperand(2)->getValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); MVT XLenVT = Subtarget->getXLenVT(); @@ -239,10 +241,17 @@ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); - SDValue Operands[] = {StoreVal, - Node->getOperand(2 + NF), // Base pointer. - Node->getOperand(3 + NF), // VL. - SEW, Node->getOperand(0)}; // Chain + SmallVector Operands; + Operands.push_back(StoreVal); + Operands.push_back(Node->getOperand(2 + NF)); // Base pointer. + if (IsStride) { + Operands.push_back(Node->getOperand(3 + NF)); // Stride. + Operands.push_back(Node->getOperand(4 + NF)); // VL. + } else { + Operands.push_back(Node->getOperand(3 + NF)); // VL. + } + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( IntNo, ScalarSize, static_cast(LMUL)); SDNode *Store = @@ -250,9 +259,11 @@ ReplaceNode(Node, Store); } -void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo) { +void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStride) { SDLoc DL(Node); unsigned NF = Node->getNumOperands() - 5; + if (IsStride) + NF--; EVT VT = Node->getOperand(2)->getValueType(0); unsigned ScalarSize = VT.getScalarSizeInBits(); MVT XLenVT = Subtarget->getXLenVT(); @@ -260,12 +271,19 @@ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT); SmallVector Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); - SDValue Operands[] = {StoreVal, - Node->getOperand(2 + NF), // Base pointer. - Node->getOperand(3 + NF), // Mask. - Node->getOperand(4 + NF), // VL. - SEW, - Node->getOperand(0)}; // Chain + SmallVector Operands; + Operands.push_back(StoreVal); + Operands.push_back(Node->getOperand(2 + NF)); // Base pointer. + if (IsStride) { + Operands.push_back(Node->getOperand(3 + NF)); // Stride. + Operands.push_back(Node->getOperand(4 + NF)); // Mask. + Operands.push_back(Node->getOperand(5 + NF)); // VL. + } else { + Operands.push_back(Node->getOperand(3 + NF)); // Mask. + Operands.push_back(Node->getOperand(4 + NF)); // VL. + } + Operands.push_back(SEW); + Operands.push_back(Node->getOperand(0)); // Chain. const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo( IntNo, ScalarSize, static_cast(LMUL)); SDNode *Store = @@ -469,7 +487,7 @@ case Intrinsic::riscv_vsseg6: case Intrinsic::riscv_vsseg7: case Intrinsic::riscv_vsseg8: { - selectVSSEG(Node, IntNo); + selectVSSEG(Node, IntNo, /*IsStride=*/false); return; } case Intrinsic::riscv_vsseg2_mask: @@ -479,7 +497,27 @@ case Intrinsic::riscv_vsseg6_mask: case Intrinsic::riscv_vsseg7_mask: case Intrinsic::riscv_vsseg8_mask: { - selectVSSEGMask(Node, IntNo); + selectVSSEGMask(Node, IntNo, /*IsStride=*/false); + return; + } + case Intrinsic::riscv_vssseg2: + case Intrinsic::riscv_vssseg3: + case Intrinsic::riscv_vssseg4: + case Intrinsic::riscv_vssseg5: + case Intrinsic::riscv_vssseg6: + case Intrinsic::riscv_vssseg7: + case Intrinsic::riscv_vssseg8: { + selectVSSEG(Node, IntNo, /*IsStride=*/true); + return; + } + case Intrinsic::riscv_vssseg2_mask: + case Intrinsic::riscv_vssseg3_mask: + case Intrinsic::riscv_vssseg4_mask: + case Intrinsic::riscv_vssseg5_mask: + case Intrinsic::riscv_vssseg6_mask: + case Intrinsic::riscv_vssseg7_mask: + case Intrinsic::riscv_vssseg8_mask: { + selectVSSEGMask(Node, IntNo, /*IsStride=*/true); return; } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -412,7 +412,8 @@ class ToLowerCase { string L = !subst("VLSEG", "vlseg", !subst("VLSSEG", "vlsseg", - !subst("VSSEG", "vsseg", Upper))); + !subst("VSSEG", "vsseg", + !subst("VSSSEG", "vssseg", Upper)))); } // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2 @@ -980,6 +981,38 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoSSegStoreNoMask EEW>: + Pseudo<(outs), + (ins RetClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoSSegStoreMask EEW>: + Pseudo<(outs), + (ins RetClass:$rd, GPR:$rs1, GPR: $offset, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo, + RISCVZvlsseg.Intrinsic, EEW, VLMul> { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoUSLoad { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -1524,6 +1557,21 @@ } } +multiclass VPseudoSSegStore { + foreach eew = EEWList in { + foreach lmul = MxSet.m in { + defvar LInfo = lmul.MX; + let VLMul = lmul.value in { + foreach nf = NFSet.L in { + defvar vreg = SegRegClass.RC; + def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask; + def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask; + } + } + } + } +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -2639,6 +2687,7 @@ defm PseudoVLSEG : VPseudoUSSegLoad; defm PseudoVLSSEG : VPseudoSSegLoad; defm PseudoVSSEG : VPseudoUSSegStore; +defm PseudoVSSSEG : VPseudoSSegStore; //===----------------------------------------------------------------------===// // Pseudo Instructions diff --git a/llvm/test/CodeGen/RISCV/rvv/vssseg.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vssseg.ll @@ -0,0 +1,2942 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvlsseg \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare void @llvm.riscv.vssseg2.nxv16i16(,, i16*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv16i16(,, i16*, i64, , i64) + +define void @test_vssseg2_nxv16i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv16i16( %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv16i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv16i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv4i32(,, i32*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv4i32(,, i32*, i64, , i64) + +define void @test_vssseg2_nxv4i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv4i32( %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv4i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv4i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv4i32(,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv4i32(,,, i32*, i64, , i64) + +define void @test_vssseg3_nxv4i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vssseg3e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv4i32( %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv4i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vssseg3e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv4i32( %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv4i32(,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv4i32(,,,, i32*, i64, , i64) + +define void @test_vssseg4_nxv4i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vssseg4e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv4i32( %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv4i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu +; CHECK-NEXT: vssseg4e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv4i32( %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv16i8(,, i8*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv16i8(,, i8*, i64, , i64) + +define void @test_vssseg2_nxv16i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv16i8( %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv16i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv16i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv16i8(,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv16i8(,,, i8*, i64, , i64) + +define void @test_vssseg3_nxv16i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv16i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv16i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv16i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv16i8(,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv16i8(,,,, i8*, i64, , i64) + +define void @test_vssseg4_nxv16i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv16i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv16i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m2,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv16i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv1i64(,, i64*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv1i64(,, i64*, i64, , i64) + +define void @test_vssseg2_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg2e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv1i64( %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg2e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv1i64( %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv1i64(,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv1i64(,,, i64*, i64, , i64) + +define void @test_vssseg3_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg3e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv1i64( %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg3e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv1i64( %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv1i64(,,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv1i64(,,,, i64*, i64, , i64) + +define void @test_vssseg4_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg4e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv1i64( %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg4e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv1i64( %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv1i64(,,,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv1i64(,,,,, i64*, i64, , i64) + +define void @test_vssseg5_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg5e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv1i64( %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg5e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv1i64( %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv1i64(,,,,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv1i64(,,,,,, i64*, i64, , i64) + +define void @test_vssseg6_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg6e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg6e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv1i64( %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv1i64(,,,,,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv1i64(,,,,,,, i64*, i64, , i64) + +define void @test_vssseg7_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg7e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg7e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv1i64(,,,,,,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv1i64(,,,,,,,, i64*, i64, , i64) + +define void @test_vssseg8_nxv1i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg8e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv1i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vssseg8e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv1i64( %val, %val, %val, %val, %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv1i32(,, i32*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv1i32(,, i32*, i64, , i64) + +define void @test_vssseg2_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv1i32( %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv1i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv1i32(,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv1i32(,,, i32*, i64, , i64) + +define void @test_vssseg3_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg3e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv1i32( %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg3e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv1i32( %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv1i32(,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv1i32(,,,, i32*, i64, , i64) + +define void @test_vssseg4_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg4e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv1i32( %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg4e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv1i32( %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv1i32(,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv1i32(,,,,, i32*, i64, , i64) + +define void @test_vssseg5_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg5e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg5e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv1i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv1i32(,,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv1i32(,,,,,, i32*, i64, , i64) + +define void @test_vssseg6_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg6e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg6e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv1i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv1i32(,,,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv1i32(,,,,,,, i32*, i64, , i64) + +define void @test_vssseg7_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg7e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg7e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv1i32(,,,,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv1i32(,,,,,,,, i32*, i64, , i64) + +define void @test_vssseg8_nxv1i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg8e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv1i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,mf2,ta,mu +; CHECK-NEXT: vssseg8e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv1i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv8i16(,, i16*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv8i16(,, i16*, i64, , i64) + +define void @test_vssseg2_nxv8i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv8i16( %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv8i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv8i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv8i16(,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv8i16(,,, i16*, i64, , i64) + +define void @test_vssseg3_nxv8i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv8i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv8i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv8i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv8i16(,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv8i16(,,,, i16*, i64, , i64) + +define void @test_vssseg4_nxv8i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv8i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv8i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv8i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv4i8(,, i8*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv4i8(,, i8*, i64, , i64) + +define void @test_vssseg2_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv4i8( %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv4i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv4i8(,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv4i8(,,, i8*, i64, , i64) + +define void @test_vssseg3_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv4i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv4i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv4i8(,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv4i8(,,,, i8*, i64, , i64) + +define void @test_vssseg4_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv4i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv4i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv4i8(,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv4i8(,,,,, i8*, i64, , i64) + +define void @test_vssseg5_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv4i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv4i8(,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv4i8(,,,,,, i8*, i64, , i64) + +define void @test_vssseg6_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv4i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv4i8(,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv4i8(,,,,,,, i8*, i64, , i64) + +define void @test_vssseg7_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv4i8(,,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv4i8(,,,,,,,, i8*, i64, , i64) + +define void @test_vssseg8_nxv4i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv4i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf2,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv4i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv1i16(,, i16*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv1i16(,, i16*, i64, , i64) + +define void @test_vssseg2_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv1i16( %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv1i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv1i16(,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv1i16(,,, i16*, i64, , i64) + +define void @test_vssseg3_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv1i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv1i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv1i16(,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv1i16(,,,, i16*, i64, , i64) + +define void @test_vssseg4_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv1i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv1i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv1i16(,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv1i16(,,,,, i16*, i64, , i64) + +define void @test_vssseg5_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg5e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg5e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv1i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv1i16(,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv1i16(,,,,,, i16*, i64, , i64) + +define void @test_vssseg6_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg6e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg6e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv1i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv1i16(,,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv1i16(,,,,,,, i16*, i64, , i64) + +define void @test_vssseg7_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg7e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg7e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv1i16(,,,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv1i16(,,,,,,,, i16*, i64, , i64) + +define void @test_vssseg8_nxv1i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg8e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv1i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf4,ta,mu +; CHECK-NEXT: vssseg8e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv1i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv2i32(,, i32*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv2i32(,, i32*, i64, , i64) + +define void @test_vssseg2_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv2i32( %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv2i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv2i32(,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv2i32(,,, i32*, i64, , i64) + +define void @test_vssseg3_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg3e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv2i32( %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg3e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv2i32( %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv2i32(,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv2i32(,,,, i32*, i64, , i64) + +define void @test_vssseg4_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg4e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv2i32( %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg4e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv2i32( %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv2i32(,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv2i32(,,,,, i32*, i64, , i64) + +define void @test_vssseg5_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg5e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg5e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv2i32( %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv2i32(,,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv2i32(,,,,,, i32*, i64, , i64) + +define void @test_vssseg6_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg6e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg6e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv2i32( %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv2i32(,,,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv2i32(,,,,,,, i32*, i64, , i64) + +define void @test_vssseg7_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg7e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg7e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv2i32(,,,,,,,, i32*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv2i32(,,,,,,,, i32*, i64, , i64) + +define void @test_vssseg8_nxv2i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg8e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv2i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu +; CHECK-NEXT: vssseg8e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv2i32( %val, %val, %val, %val, %val, %val, %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv8i8(,, i8*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv8i8(,, i8*, i64, , i64) + +define void @test_vssseg2_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv8i8( %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv8i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv8i8(,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv8i8(,,, i8*, i64, , i64) + +define void @test_vssseg3_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv8i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv8i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv8i8(,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv8i8(,,,, i8*, i64, , i64) + +define void @test_vssseg4_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv8i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv8i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv8i8(,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv8i8(,,,,, i8*, i64, , i64) + +define void @test_vssseg5_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv8i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv8i8(,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv8i8(,,,,,, i8*, i64, , i64) + +define void @test_vssseg6_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv8i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv8i8(,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv8i8(,,,,,,, i8*, i64, , i64) + +define void @test_vssseg7_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv8i8(,,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv8i8(,,,,,,,, i8*, i64, , i64) + +define void @test_vssseg8_nxv8i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv8i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv8i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv4i64(,, i64*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv4i64(,, i64*, i64, , i64) + +define void @test_vssseg2_nxv4i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vssseg2e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv4i64( %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv4i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vssseg2e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv4i64( %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv4i16(,, i16*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv4i16(,, i16*, i64, , i64) + +define void @test_vssseg2_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv4i16( %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv4i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv4i16(,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv4i16(,,, i16*, i64, , i64) + +define void @test_vssseg3_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv4i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv4i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv4i16(,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv4i16(,,,, i16*, i64, , i64) + +define void @test_vssseg4_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv4i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv4i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv4i16(,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv4i16(,,,,, i16*, i64, , i64) + +define void @test_vssseg5_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg5e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg5e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv4i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv4i16(,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv4i16(,,,,,, i16*, i64, , i64) + +define void @test_vssseg6_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg6e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg6e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv4i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv4i16(,,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv4i16(,,,,,,, i16*, i64, , i64) + +define void @test_vssseg7_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg7e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg7e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv4i16(,,,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv4i16(,,,,,,,, i16*, i64, , i64) + +define void @test_vssseg8_nxv4i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg8e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv4i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu +; CHECK-NEXT: vssseg8e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv4i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv1i8(,, i8*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv1i8(,, i8*, i64, , i64) + +define void @test_vssseg2_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv1i8( %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv1i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv1i8(,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv1i8(,,, i8*, i64, , i64) + +define void @test_vssseg3_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv1i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv1i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv1i8(,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv1i8(,,,, i8*, i64, , i64) + +define void @test_vssseg4_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv1i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv1i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv1i8(,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv1i8(,,,,, i8*, i64, , i64) + +define void @test_vssseg5_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv1i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv1i8(,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv1i8(,,,,,, i8*, i64, , i64) + +define void @test_vssseg6_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv1i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv1i8(,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv1i8(,,,,,,, i8*, i64, , i64) + +define void @test_vssseg7_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv1i8(,,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv1i8(,,,,,,,, i8*, i64, , i64) + +define void @test_vssseg8_nxv1i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv1i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf8,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv1i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv2i8(,, i8*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv2i8(,, i8*, i64, , i64) + +define void @test_vssseg2_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv2i8( %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv2i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv2i8(,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv2i8(,,, i8*, i64, , i64) + +define void @test_vssseg3_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv2i8( %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg3e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv2i8( %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv2i8(,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv2i8(,,,, i8*, i64, , i64) + +define void @test_vssseg4_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv2i8( %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg4e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv2i8( %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv2i8(,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv2i8(,,,,, i8*, i64, , i64) + +define void @test_vssseg5_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg5e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv2i8( %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv2i8(,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv2i8(,,,,,, i8*, i64, , i64) + +define void @test_vssseg6_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg6e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv2i8( %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv2i8(,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv2i8(,,,,,,, i8*, i64, , i64) + +define void @test_vssseg7_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg7e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv2i8(,,,,,,,, i8*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv2i8(,,,,,,,, i8*, i64, , i64) + +define void @test_vssseg8_nxv2i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv2i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,mf4,ta,mu +; CHECK-NEXT: vssseg8e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv2i8( %val, %val, %val, %val, %val, %val, %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv8i32(,, i32*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv8i32(,, i32*, i64, , i64) + +define void @test_vssseg2_nxv8i32( %val, i32* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m4,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv8i32( %val, %val, i32* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv8i32( %val, i32* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e32,m4,ta,mu +; CHECK-NEXT: vssseg2e32.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv8i32( %val, %val, i32* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv32i8(,, i8*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv32i8(,, i8*, i64, , i64) + +define void @test_vssseg2_nxv32i8( %val, i8* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv32i8( %val, %val, i8* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv32i8( %val, i8* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4 +; CHECK-NEXT: vmv4r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu +; CHECK-NEXT: vssseg2e8.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv32i8( %val, %val, i8* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv2i16(,, i16*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv2i16(,, i16*, i64, , i64) + +define void @test_vssseg2_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv2i16( %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg2e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv2i16( %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv2i16(,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv2i16(,,, i16*, i64, , i64) + +define void @test_vssseg3_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv2i16( %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg3e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv2i16( %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv2i16(,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv2i16(,,,, i16*, i64, , i64) + +define void @test_vssseg4_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv2i16( %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg4e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv2i16( %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg5.nxv2i16(,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg5.mask.nxv2i16(,,,,, i16*, i64, , i64) + +define void @test_vssseg5_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg5_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg5e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg5_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg5_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg5e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg5.mask.nxv2i16( %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg6.nxv2i16(,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg6.mask.nxv2i16(,,,,,, i16*, i64, , i64) + +define void @test_vssseg6_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg6_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg6e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg6_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg6_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg6e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg6.mask.nxv2i16( %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg7.nxv2i16(,,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg7.mask.nxv2i16(,,,,,,, i16*, i64, , i64) + +define void @test_vssseg7_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg7_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg7e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg7_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg7_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg7e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg7.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg8.nxv2i16(,,,,,,,, i16*, i64, i64) +declare void @llvm.riscv.vssseg8.mask.nxv2i16(,,,,,,,, i16*, i64, , i64) + +define void @test_vssseg8_nxv2i16( %val, i16* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg8_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg8e16.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg8_mask_nxv2i16( %val, i16* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg8_mask_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23 +; CHECK-NEXT: vmv1r.v v17, v16 +; CHECK-NEXT: vmv1r.v v18, v16 +; CHECK-NEXT: vmv1r.v v19, v16 +; CHECK-NEXT: vmv1r.v v20, v16 +; CHECK-NEXT: vmv1r.v v21, v16 +; CHECK-NEXT: vmv1r.v v22, v16 +; CHECK-NEXT: vmv1r.v v23, v16 +; CHECK-NEXT: vsetvli a2, a2, e16,mf2,ta,mu +; CHECK-NEXT: vssseg8e16.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg8.mask.nxv2i16( %val, %val, %val, %val, %val, %val, %val, %val, i16* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg2.nxv2i64(,, i64*, i64, i64) +declare void @llvm.riscv.vssseg2.mask.nxv2i64(,, i64*, i64, , i64) + +define void @test_vssseg2_nxv2i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg2_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vssseg2e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.nxv2i64( %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg2_mask_nxv2i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg2_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vssseg2e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg2.mask.nxv2i64( %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg3.nxv2i64(,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg3.mask.nxv2i64(,,, i64*, i64, , i64) + +define void @test_vssseg3_nxv2i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg3_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vssseg3e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.nxv2i64( %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg3_mask_nxv2i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg3_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vssseg3e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg3.mask.nxv2i64( %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} + +declare void @llvm.riscv.vssseg4.nxv2i64(,,,, i64*, i64, i64) +declare void @llvm.riscv.vssseg4.mask.nxv2i64(,,,, i64*, i64, , i64) + +define void @test_vssseg4_nxv2i64( %val, i64* %base, i64 %offset, i64 %vl) { +; CHECK-LABEL: test_vssseg4_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vssseg4e64.v v16, (a0), a1 +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.nxv2i64( %val, %val, %val, %val, i64* %base, i64 %offset, i64 %vl) + ret void +} + +define void @test_vssseg4_mask_nxv2i64( %val, i64* %base, i64 %offset, %mask, i64 %vl) { +; CHECK-LABEL: test_vssseg4_mask_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2 +; CHECK-NEXT: vmv2r.v v18, v16 +; CHECK-NEXT: vmv2r.v v20, v16 +; CHECK-NEXT: vmv2r.v v22, v16 +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vssseg4e64.v v16, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vssseg4.mask.nxv2i64( %val, %val, %val, %val, i64* %base, i64 %offset, %mask, i64 %vl) + ret void +} +