diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -446,4 +446,8 @@ defm vslideup : RISCVTernaryAAAX; defm vslidedown : RISCVTernaryAAAX; + defm vslide1up : RISCVBinaryAAX; + defm vslide1down : RISCVBinaryAAX; + defm vfslide1up : RISCVBinaryAAX; + defm vfslide1down : RISCVBinaryAAX; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1751,6 +1751,10 @@ //===----------------------------------------------------------------------===// defm PseudoVSLIDEUP : VPseudoTernaryV_VX_VI; defm PseudoVSLIDEDOWN : VPseudoTernaryV_VX_VI; +defm PseudoVSLIDE1UP : VPseudoBinaryV_VX; +defm PseudoVSLIDE1DOWN : VPseudoBinaryV_VX; +defm PseudoVFSLIDE1UP : VPseudoBinaryV_VX; +defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VX; //===----------------------------------------------------------------------===// // Patterns. @@ -2050,9 +2054,13 @@ let Predicates = [HasStdExtV] in { defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllIntegerVectors, uimm5>; defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllIntegerVectors, uimm5>; + defm "" : VPatBinaryV_VX<"int_riscv_vslide1up", "PseudoVSLIDE1UP", AllIntegerVectors>; + defm "" : VPatBinaryV_VX<"int_riscv_vslide1down", "PseudoVSLIDE1DOWN", AllIntegerVectors>; } // Predicates = [HasStdExtV] let Predicates = [HasStdExtV, HasStdExtF] in { defm "" : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllFloatVectors, uimm5>; defm "" : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllFloatVectors, uimm5>; + defm "" : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP", AllFloatVectors>; + defm "" : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>; } // Predicates = [HasStdExtV, HasStdExtF] diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll @@ -0,0 +1,441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll @@ -0,0 +1,601 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfslide1down.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1down.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfslide1down.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1down.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll @@ -0,0 +1,441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll @@ -0,0 +1,601 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfslide1up.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0 + %a = call @llvm.riscv.vfslide1up.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfslide1up.vf {{v[0-9]+}}, {{v[0-9]+}}, ft0, v0.t + %a = call @llvm.riscv.vfslide1up.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslide1down.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv64i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv64i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv64i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv32i16.i16( + , + i16, + i32); + +define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv32i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv32i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv16i32.i32( + , + i32, + i32); + +define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv16i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv16i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv64.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslide1down.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1down.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1down.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1down.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vslide1down.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1down.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -0,0 +1,20 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslide1up.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv64.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vslide1up.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv64i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv64i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv32i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv32i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv16i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv16i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vslide1up.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vslide1up.nxv8i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vslide1up.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vslide1up.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t + %a = call @llvm.riscv.vslide1up.mask.nxv8i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +}