diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -95,6 +95,21 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For strided load + // Input: (pointer, stride, vl) + class RISCVSLoad + : Intrinsic<[llvm_anyvector_ty], + [LLVMPointerType>, + llvm_anyint_ty, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For strided load with mask + // Input: (maskedoff, pointer, stride, mask, vl) + class RISCVSLoadMask + : Intrinsic<[llvm_anyvector_ty ], + [LLVMMatchType<0>, + LLVMPointerType>, llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; // For unit stride store // Input: (vector_in, pointer, vl) class RISCVUSStore @@ -112,6 +127,22 @@ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For strided store + // Input: (vector_in, pointer, stride, vl) + class RISCVSStore + : Intrinsic<[], + [llvm_anyvector_ty, + LLVMPointerType>, + llvm_anyint_ty, llvm_anyint_ty], + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For unit stride store with mask + // Input: (vector_in, pointer, stirde, mask, vl) + class RISCVSStoreMask + : Intrinsic<[], + [llvm_anyvector_ty, + LLVMPointerType>, llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; // For destination vector type is the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAXNoMask @@ -182,10 +213,18 @@ def "int_riscv_" # NAME : RISCVUSLoad; def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask; } + multiclass RISCVSLoad { + def "int_riscv_" # NAME : RISCVSLoad; + def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask; + } multiclass RISCVUSStore { def "int_riscv_" # NAME : RISCVUSStore; def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask; } + multiclass RISCVSStore { + def "int_riscv_" # NAME : RISCVSStore; + def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask; + } multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; @@ -207,6 +246,8 @@ defm vle : RISCVUSLoad; defm vse : RISCVUSStore; + defm vlse: RISCVSLoad; + defm vsse: RISCVSStore; defm vadd : RISCVBinaryAAX; defm vsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -310,6 +310,39 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoSLoadNoMask: + Pseudo<(outs RetClass:$rd), + (ins GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoSLoadMask: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + GPR:$rs1, GPR:$rs2, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let VLIndex = 5; + let SEWIndex = 6; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUSStoreNoMask: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, @@ -339,6 +372,35 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoSStoreNoMask: + Pseudo<(outs), + (ins StClass:$rd, GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoSStoreMask: + Pseudo<(outs), + (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryNoMask; + def "_V_" # LInfo # "_MASK" : VPseudoSLoadMask; + } + } +} + multiclass VPseudoUSStore { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -426,6 +499,17 @@ } } +multiclass VPseudoSStore { + foreach lmul = MxList.m in { + defvar LInfo = lmul.MX; + defvar vreg = lmul.vrclass; + let VLMul = lmul.value in { + def "_V_" # LInfo : VPseudoSStoreNoMask; + def "_V_" # LInfo # "_MASK" : VPseudoSStoreMask; + } + } +} + multiclass VPseudoBinary; } +multiclass VPatSLoad +{ + defvar Intr = !cast(intrinsic); + defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); + def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, GPR:$vl)), + (Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>; + defvar IntrMask = !cast(intrinsic # "_mask"); + defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); + def : Pat<(type (IntrMask (type GetVRegNoV0.R:$merge), + GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl)), + (PseudoMask $merge, + $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>; +} + multiclass VPatUSStore; } +multiclass VPatSStore +{ + defvar Intr = !cast(intrinsic); + defvar Pseudo = !cast(inst#"_V_"#vlmul.MX); + def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, GPR:$vl), + (Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>; + defvar IntrMask = !cast(intrinsic # "_mask"); + defvar PseudoMask = !cast(inst#"_V_"#vlmul.MX#"_MASK"); + def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl), + (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>; +} + multiclass VPatBinary; } +//===----------------------------------------------------------------------===// +// 7.5 Vector Strided Instructions +//===----------------------------------------------------------------------===// + +foreach vti = AllVectors in +{ + defm : VPatSLoad<"int_riscv_vlse", + "PseudoVLSE" # vti.SEW, + vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; + defm : VPatSStore<"int_riscv_vsse", + "PseudoVSSE" # vti.SEW, + vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>; +} + //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll @@ -0,0 +1,1161 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vlse.nxv1i32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv1i32_nxv1i32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv2i32_nxv2i32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv4i32_nxv4i32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv8i32_nxv8i32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv16i32_nxv16i32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16i32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv1f32_nxv1f32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1f32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv2f32_nxv2f32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2f32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv4f32_nxv4f32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4f32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv8f32_nxv8f32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8f32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16f32( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv16f32_nxv16f32(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16f32( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16f32( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv1i16_nxv1i16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv2i16_nxv2i16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv4i16_nxv4i16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv8i16_nxv8i16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv16i16_nxv16i16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16i16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv32i16_nxv32i16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv32i16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv32i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv1f16_nxv1f16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1f16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv2f16_nxv2f16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2f16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv4f16_nxv4f16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4f16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv8f16_nxv8f16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8f16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16f16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv16f16_nxv16f16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16f16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16f16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32f16( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv32f16_nxv32f16(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv32f16( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32f16( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv32f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv1i8_nxv1i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv2i8_nxv2i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv4i8_nxv4i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv8i8_nxv8i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv16i8_nxv16i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv32i8_nxv32i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv32i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv32i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv64i8( + *, + i32, + i32); + +define @intrinsic_vlse_v_nxv64i8_nxv64i8(* %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv64i8( + * %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv64i8( + , + *, + i32, + , + i32); + +define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv64i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll @@ -0,0 +1,1481 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vlse.nxv1i64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1i64_nxv1i64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2i64_nxv2i64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2i64_nxv2i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4i64_nxv4i64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4i64_nxv4i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8i64_nxv8i64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8i64_nxv8i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1f64_nxv1f64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1f64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1f64_nxv1f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2f64_nxv2f64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2f64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2f64_nxv2f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4f64_nxv4f64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4f64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4f64_nxv4f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f64( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8f64_nxv8f64(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8f64( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f64( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8f64_nxv8f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vlse64.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1i32_nxv1i32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2i32_nxv2i32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2i32_nxv2i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4i32_nxv4i32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4i32_nxv4i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8i32_nxv8i32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8i32_nxv8i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv16i32_nxv16i32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16i32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv16i32_nxv16i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1f32_nxv1f32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1f32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1f32_nxv1f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2f32_nxv2f32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2f32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2f32_nxv2f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4f32_nxv4f32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4f32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4f32_nxv4f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8f32_nxv8f32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8f32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8f32_nxv8f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16f32( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv16f32_nxv16f32(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16f32( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16f32( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv16f32_nxv16f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vlse32.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1i16_nxv1i16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1i16_nxv1i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2i16_nxv2i16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2i16_nxv2i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4i16_nxv4i16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4i16_nxv4i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8i16_nxv8i16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8i16_nxv8i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv16i16_nxv16i16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16i16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv16i16_nxv16i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv32i16_nxv32i16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv32i16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv32i16_nxv32i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv32i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1f16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1f16_nxv1f16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1f16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1f16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1f16_nxv1f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2f16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2f16_nxv2f16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2f16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2f16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2f16_nxv2f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4f16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4f16_nxv4f16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4f16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4f16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4f16_nxv4f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8f16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8f16_nxv8f16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8f16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8f16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8f16_nxv8f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16f16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv16f16_nxv16f16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16f16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16f16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv16f16_nxv16f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32f16( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv32f16_nxv32f16(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv32f16( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32f16( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv32f16_nxv32f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vlse16.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv32f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv1i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv1i8_nxv1i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv1i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv1i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv1i8_nxv1i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv1i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv2i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv2i8_nxv2i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv2i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv2i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv2i8_nxv2i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv2i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv4i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv4i8_nxv4i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv4i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv4i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv4i8_nxv4i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv4i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv8i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv8i8_nxv8i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv8i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv8i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv8i8_nxv8i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv8i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv16i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv16i8_nxv16i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv16i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv16i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv16i8_nxv16i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv16i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv32i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv32i8_nxv32i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv32i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv32i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv32i8_nxv32i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv32i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlse.nxv64i8( + *, + i64, + i64); + +define @intrinsic_vlse_v_nxv64i8_nxv64i8(* %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1 + %a = call @llvm.riscv.vlse.nxv64i8( + * %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlse.mask.nxv64i8( + , + *, + i64, + , + i64); + +define @intrinsic_vlse_mask_v_nxv64i8_nxv64i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vlse8.v {{v[0-9]+}}, (a0), a1, v0.t + %a = call @llvm.riscv.vlse.mask.nxv64i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll @@ -0,0 +1,1219 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsse.nxv1i32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16i32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16i32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1f32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2f32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4f32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8f32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16f32( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16f32( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16f32( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16f32( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16i16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32i16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv32i16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32i16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv32i16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1f16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2f16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4f16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8f16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16f16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16f16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16f16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32f16( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv32f16( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32f16( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv32f16( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv32i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv32i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv64i8( + , + *, + i32, + i32); + +define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, * %1, i32 %2, i32%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv64i8( + %0, + * %1, + i32 %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv64i8( + , + *, + i32, + , + i32); + +define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, * %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv64i8( + %0, + * %1, + i32 %2, + %3, + i32 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll @@ -0,0 +1,1555 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsse.nxv1i64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1i64_nxv1i64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2i64_nxv2i64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4i64_nxv4i64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8i64_nxv8i64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1f64_nxv1f64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1f64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, a2, e64,m1,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2f64_nxv2f64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2f64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, a2, e64,m2,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4f64_nxv4f64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4f64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, a2, e64,m4,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f64( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8f64_nxv8f64( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8f64( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f64( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, a2, e64,m8,ta,mu +; CHECK: vsse64.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8f64( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2i32_nxv2i32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4i32_nxv4i32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8i32_nxv8i32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv16i32_nxv16i32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16i32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16i32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1f32_nxv1f32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1f32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, a2, e32,mf2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2f32_nxv2f32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2f32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, a2, e32,m1,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4f32_nxv4f32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4f32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, a2, e32,m2,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8f32_nxv8f32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8f32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, a2, e32,m4,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16f32( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv16f32_nxv16f32( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16f32( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16f32( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, a2, e32,m8,ta,mu +; CHECK: vsse32.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16f32( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1i16_nxv1i16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2i16_nxv2i16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4i16_nxv4i16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8i16_nxv8i16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv16i16_nxv16i16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16i16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32i16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv32i16_nxv32i16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv32i16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32i16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv32i16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1f16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1f16_nxv1f16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1f16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1f16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2f16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2f16_nxv2f16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2f16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2f16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, a2, e16,mf2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4f16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4f16_nxv4f16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4f16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4f16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, a2, e16,m1,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8f16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8f16_nxv8f16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8f16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8f16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, a2, e16,m2,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16f16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv16f16_nxv16f16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16f16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16f16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, a2, e16,m4,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32f16( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv32f16_nxv32f16( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv32f16( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32f16( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, a2, e16,m8,ta,mu +; CHECK: vsse16.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv32f16( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv1i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv1i8_nxv1i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv1i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv1i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv1i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv2i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv2i8_nxv2i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv2i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv2i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv2i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv4i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv4i8_nxv4i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv4i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv4i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a2, e8,mf2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv4i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv8i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv8i8_nxv8i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv8i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv8i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a2, e8,m1,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv8i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv16i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv16i8_nxv16i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv16i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv16i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a2, e8,m2,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv16i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv32i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv32i8_nxv32i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv32i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv32i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a2, e8,m4,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv32i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsse.nxv64i8( + , + *, + i64, + i64); + +define void @intrinsic_vsse_v_nxv64i8_nxv64i8( %0, * %1, i64 %2, i64%3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1 + call void @llvm.riscv.vsse.nxv64i8( + %0, + * %1, + i64 %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsse.mask.nxv64i8( + , + *, + i64, + , + i64); + +define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8( %0, * %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a2, e8,m8,ta,mu +; CHECK: vsse8.v {{v[0-9]+}}, (a0), a1, v0.t + call void @llvm.riscv.vsse.mask.nxv64i8( + %0, + * %1, + i64 %2, + %3, + i64 %4) + + ret void +}