diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -96,13 +96,34 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 3; } + class RVVTernaryAAAXNoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 3; + } + class RVVTernaryAAAXMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty, + llvm_anyvector_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 3; + } multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; } + multiclass RISCVTernaryAAAX { + def "int_riscv_" # NAME : RVVTernaryAAAXNoMask; + def "int_riscv_" # NAME # "_mask" : RVVTernaryAAAXMask; + } defm vadd : RISCVBinaryAAX; defm vsub : RISCVBinaryAAX; defm vrsub : RISCVBinaryAAX; + + defm vslideup : RISCVTernaryAAAX; + defm vslidedown : RISCVTernaryAAAX; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -83,18 +83,22 @@ // Vector register and vector group type information. //===----------------------------------------------------------------------===// -class VTypeInfo +class VTypeInfo { ValueType Vector = Vec; ValueType Mask = Mas; int SEW = Sew; VReg RegClass = Reg; LMULInfo LMul = M; + ValueType Scalar = Scal; + RegisterClass ScalarRegClass = ScalarReg; } -class GroupVTypeInfo - : VTypeInfo +class GroupVTypeInfo + : VTypeInfo { ValueType VectorM1 = VecM1; } @@ -128,6 +132,42 @@ def : GroupVTypeInfo; def : GroupVTypeInfo; } + + defset list AllFloatVectors = { + defset list NoGroupFloatVectors = { + def VF16MF4: VTypeInfo; + def VF16MF2: VTypeInfo; + def VF16M1: VTypeInfo; + + def VF32MF2: VTypeInfo; + def VF32M1: VTypeInfo; + + def VF64M1: VTypeInfo; + } + + defset list GroupFloatVectors = { + def VF16M2: GroupVTypeInfo; + def VF16M4: GroupVTypeInfo; + def VF16M8: GroupVTypeInfo; + + def VF32M2: GroupVTypeInfo; + def VF32M4: GroupVTypeInfo; + def VF32M8: GroupVTypeInfo; + + def VF64M2: GroupVTypeInfo; + def VF64M4: GroupVTypeInfo; + def VF64M8: GroupVTypeInfo; + } + } } // This class holds the record of the RISCVVPseudoTable below. @@ -237,6 +277,27 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoTernaryNoMask : + Pseudo<(outs RetClass:$rd), + (ins RetClass:$rs3, Op1Class:$rs1, Op2Class:$rs2, + GPR:$vl, ixlenimm:$sew), + []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $rs3"; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let MergeOpIndex = 1; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + multiclass VPseudoBinary; } +multiclass VPseudoTernary { + let VLMul = MInfo.value in { + def "_" # MInfo.MX : VPseudoTernaryNoMask; + def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask; + } +} + +multiclass VPseudoTernaryV_VX { + foreach m = MxList.m in + defm _VX : VPseudoTernary; +} + +multiclass VPseudoTernaryV_VI { + foreach m = MxList.m in + defm _VI : VPseudoTernary; +} + +multiclass VPseudoTernaryV_VX_VI { + defm "" : VPseudoTernaryV_VX; + defm "" : VPseudoTernaryV_VI; +} + //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// @@ -349,6 +435,54 @@ ToFPR32.ret, (mask_type V0), (NoX0 GPR:$vl), sew)>; +class VPatTernaryNoMask : + Pat<(result_type (!cast(intrinsic) + (result_type result_reg_class:$rs3), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT GPR:$vl))), + (!cast(inst#_#kind#"_"# vlmul.MX) + result_reg_class:$rs3, + ToFPR32.ret, + op2_kind:$rs2, + (NoX0 GPR:$vl), sew)>; + +class VPatTernaryMask : + Pat<(result_type (!cast(intrinsic#"_mask") + (result_type result_reg_class:$rs3), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst#_#kind#"_"# vlmul.MX # "_MASK") + result_reg_class:$rs3, + ToFPR32.ret, + op2_kind:$rs2, + (mask_type V0), + (NoX0 GPR:$vl), sew)>; + multiclass VPatBinary; } +multiclass VPatTernary { + def : VPatTernaryNoMask; + def : VPatTernaryMask; +} + +multiclass VPatTernaryV_VX vtilist> { + foreach vti = vtilist in + defm : VPatTernary; +} + +multiclass VPatTernaryV_VI vtilist, Operand Imm_type> { + foreach vti = vtilist in + defm : VPatTernary; +} + +multiclass VPatTernaryV_VX_VI vtilist, Operand Imm_type = simm5> { + defm "" : VPatTernaryV_VX; + defm "" : VPatTernaryV_VI; +} + //===----------------------------------------------------------------------===// // Pseudo instructions and patterns. //===----------------------------------------------------------------------===// @@ -543,6 +721,18 @@ defm PseudoVRSUB : VPseudoBinaryV_VX_VI; //===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// 17. Vector Permutation Instructions +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// 17.3. Vector Slide Instructions +//===----------------------------------------------------------------------===// +defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI; +defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI; + +//===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -560,4 +750,14 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>; defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>; +//===----------------------------------------------------------------------===// +// 17. Vector Permutation Instructions +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// 17.3. Vector Slide Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllVectors, uimm5>; +defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllVectors, uimm5>; + } // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll @@ -0,0 +1,1705 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslidedown.nxv1i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i8.i8( + %0, + %1, + i8 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i8.i8( + %0, + %1, + i8 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i8.i8( + %0, + %1, + i8 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8i8.i8( + %0, + %1, + i8 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv16i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv16i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv16i8.i8( + %0, + %1, + i8 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv32i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv32i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv32i8.i8( + %0, + %1, + i8 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1i16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8i16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8i16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8i16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv16i16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv16i16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv16i16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1i32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8i32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8i32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8i32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1f16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1f16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1f16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2f16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2f16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2f16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4f16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4f16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4f16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8f16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8f16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8f16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv16f16.i16( + , + , + i16, + i32); + +define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv16f16.i16( + %0, + %1, + i16 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv16f16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16f16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv16f16.i16( + %0, + %1, + i16 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16f16.i16( + %0, + %1, + i16 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1f32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1f32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1f32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2f32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2f32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2f32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4f32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4f32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4f32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8f32.i32( + , + , + i32, + i32); + +define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8f32.i32( + %0, + %1, + i32 %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8f32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8f32.i32( + %0, + %1, + i32 9, + i32 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll @@ -0,0 +1,2131 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslidedown.nxv1i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i8_nxv1i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslidedown_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i8_nxv2i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslidedown_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i8_nxv4i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslidedown_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8i8_nxv8i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv16i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslidedown_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv16i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv16i8_nxv16i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv16i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv32i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslidedown_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv32i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv32i8_nxv32i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv32i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i16_nxv1i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i16_nxv2i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i16_nxv4i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8i16_nxv8i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv16i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv16i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv16i16_nxv16i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv16i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i32_nxv1i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i32_nxv2i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i32_nxv4i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8i32_nxv8i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1i64.i64( + , + , + i64, + i64); + +define @intrinsic_vslidedown_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1i64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1i64_nxv1i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1i64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2i64.i64( + , + , + i64, + i64); + +define @intrinsic_vslidedown_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2i64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2i64_nxv2i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2i64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4i64.i64( + , + , + i64, + i64); + +define @intrinsic_vslidedown_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4i64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4i64_nxv4i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4i64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1f16_nxv1f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2f16_nxv2f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4f16_nxv4f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8f16_nxv8f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv16f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslidedown_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv16f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv16f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv16f16_nxv16f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv16f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv16f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1f32_nxv1f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2f32_nxv2f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4f32_nxv4f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv8f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslidedown_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv8f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv8f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv8f32_nxv8f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv8f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv8f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv1f64.i64( + , + , + i64, + i64); + +define @intrinsic_vslidedown_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv1f64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv1f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv1f64_nxv1f64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv1f64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv1f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv2f64.i64( + , + , + i64, + i64); + +define @intrinsic_vslidedown_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv2f64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv2f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv2f64_nxv2f64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv2f64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv2f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.nxv4f64.i64( + , + , + i64, + i64); + +define @intrinsic_vslidedown_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslidedown.nxv4f64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslidedown.mask.nxv4f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslidedown_vi_nxv4f64_nxv4f64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslidedown.nxv4f64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslidedown.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslidedown.mask.nxv4f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll @@ -0,0 +1,22 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslideup.nxv1i8.i8( + , + , + i8, + i32); + +define @intrinsic_vslideup_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1i8.i8( + %0, + %1, + i8 %2, + i32 %3) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll @@ -0,0 +1,2131 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslideup.nxv1i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslideup_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1i8_nxv1i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslideup_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2i8_nxv2i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslideup_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4i8_nxv4i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv8i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslideup_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv8i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv8i8_nxv8i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv8i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv16i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslideup_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv16i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv16i8_nxv16i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv16i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv16i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv32i8.i8( + , + , + i8, + i64); + +define @intrinsic_vslideup_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv32i8.i8( + %0, + %1, + i8 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv32i8_nxv32i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv32i8.i8( + %0, + %1, + i8 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv32i8.i8( + %0, + %1, + i8 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1i16_nxv1i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2i16_nxv2i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4i16_nxv4i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv8i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv8i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv8i16_nxv8i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv8i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv16i16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv16i16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv16i16_nxv16i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv16i16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv16i16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1i32_nxv1i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2i32_nxv2i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4i32_nxv4i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv8i32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv8i32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv8i32_nxv8i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv8i32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1i64.i64( + , + , + i64, + i64); + +define @intrinsic_vslideup_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1i64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1i64_nxv1i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1i64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2i64.i64( + , + , + i64, + i64); + +define @intrinsic_vslideup_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2i64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2i64_nxv2i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2i64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4i64.i64( + , + , + i64, + i64); + +define @intrinsic_vslideup_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4i64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4i64_nxv4i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4i64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1f16_nxv1f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f16_nxv1f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2f16_nxv2f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f16_nxv2f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4f16_nxv4f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f16_nxv4f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv8f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv8f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv8f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv8f16_nxv8f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv8f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f16_nxv8f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv16f16.i16( + , + , + i16, + i64); + +define @intrinsic_vslideup_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv16f16.i16( + %0, + %1, + i16 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv16f16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv16f16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv16f16_nxv16f16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv16f16.i16( + %0, + %1, + i16 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv16f16_nxv16f16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv16f16.i16( + %0, + %1, + i16 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1f32_nxv1f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f32_nxv1f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2f32_nxv2f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f32_nxv2f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4f32_nxv4f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f32_nxv4f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv8f32.i32( + , + , + i32, + i64); + +define @intrinsic_vslideup_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv8f32.i32( + %0, + %1, + i32 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv8f32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8f32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv8f32_nxv8f32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv8f32.i32( + %0, + %1, + i32 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv8f32_nxv8f32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv8f32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1f64.i64( + , + , + i64, + i64); + +define @intrinsic_vslideup_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv1f64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv1f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv1f64_nxv1f64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv1f64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv1f64_nxv1f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv1f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv2f64.i64( + , + , + i64, + i64); + +define @intrinsic_vslideup_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv2f64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv2f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv2f64_nxv2f64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv2f64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv2f64_nxv2f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv2f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv4f64.i64( + , + , + i64, + i64); + +define @intrinsic_vslideup_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vx_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vslideup.nxv4f64.i64( + %0, + %1, + i64 %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vslideup.mask.nxv4f64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4f64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vslideup_vi_nxv4f64_nxv4f64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_vi_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vslideup.nxv4f64.i64( + %0, + %1, + i64 9, + i64 %2) + + ret %a +} + +define @intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslideup_mask_vi_nxv4f64_nxv4f64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslideup.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vslideup.mask.nxv4f64.i64( + %0, + %1, + i64 9, + %2, + i64 %3) + + ret %a +}