diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -98,5 +98,7 @@ } defm vadd : riscv_binary; + defm vsub : riscv_binary; + defm vrsub : riscv_binary; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -248,94 +248,156 @@ defm "" : VPseudoBinaryV_VI; } +multiclass VPseudoBinary_VV_VX { + defm "" : VPseudoBinaryV_VV; + defm "" : VPseudoBinaryV_VX; +} + +multiclass VPseudoBinary_VX_VI { + defm "" : VPseudoBinaryV_VX; + defm "" : VPseudoBinaryV_VI; +} + //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// +class VPatBinarySDNode : + Pat<(result_type (vop + (op_type op_reg_class:$rs1), + (op_type op_reg_class:$rs2))), + (!cast(instruction_name#"_VV_"# vlmul.MX) + op_reg_class:$rs1, + op_reg_class:$rs2, + VLMax, sew)>; + +multiclass VPatBinarySDNode +{ + foreach vti = AllIntegerVectors in + def : VPatBinarySDNode; +} -multiclass pat_vop_binary +class VPatBinary : + Pat<(result_type (!cast(intrinsic_name) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (i64 GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op1_type op1_reg_class:$rs1), + ToFPR32.ret, + (NoX0 GPR:$vl), sew)>; + +class VPatBinaryMask : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (i64 GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + ToFPR32.ret, + (mask_type V0), (NoX0 GPR:$vl), sew)>; + +multiclass VPatBinary { - defvar instruction = !cast(instruction_name#"_VV_"# vlmul.MX); - def : Pat<(result_type (vop - (op_type op_reg_class:$rs1), - (op_type op_reg_class:$rs2))), - (instruction op_reg_class:$rs1, - op_reg_class:$rs2, - VLMax, sew)>; -} - -multiclass pat_vop_binary_common vtilist> + def : VPatBinary; + def : VPatBinaryMask; +} + +multiclass VPatBinaryV_VV { + foreach vti = AllIntegerVectors in + defm : VPatBinary; +} + +multiclass VPatBinaryV_VX { + foreach vti = AllIntegerVectors in + defm : VPatBinary; +} + +multiclass VPatBinaryV_VI { + foreach vti = AllIntegerVectors in + defm : VPatBinary; +} + +multiclass VPatBinary_VV_VX_VI { - foreach vti = vtilist in - defm : pat_vop_binary; -} - -multiclass pat_intrinsic_binary + defm "" : VPatBinaryV_VV; + defm "" : VPatBinaryV_VX; + defm "" : VPatBinaryV_VI; +} + +multiclass VPatBinary_VV_VX { - defvar inst = !cast(instruction_name#_#kind#"_"# vlmul.MX); - defvar inst_mask = !cast(instruction_name#_#kind#"_"# vlmul.MX#"_MASK"); - - def : Pat<(result_type (!cast(intrinsic_name) - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (i64 GPR:$vl))), - (inst (op1_type op1_reg_class:$rs1), - ToFPR32.ret, - (NoX0 GPR:$vl), sew)>; - - def : Pat<(result_type (!cast(intrinsic_name#"_mask") - (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - (op2_type op2_kind:$rs2), - (mask_type V0), - (i64 GPR:$vl))), - (inst_mask (result_type result_reg_class:$merge), - (op1_type op1_reg_class:$rs1), - ToFPR32.ret, - (mask_type V0), (NoX0 GPR:$vl), sew)>; -} - -multiclass pat_intrinsic_binary_int_v_vv_vx_vi + defm "" : VPatBinaryV_VV; + defm "" : VPatBinaryV_VX; +} + +multiclass VPatBinary_VX_VI { - foreach vti = AllIntegerVectors in - { - defm : pat_intrinsic_binary; - defm : pat_intrinsic_binary; - defm : pat_intrinsic_binary; - } + defm "" : VPatBinaryV_VX; + defm "" : VPatBinaryV_VI; } //===----------------------------------------------------------------------===// @@ -447,14 +509,18 @@ // Pseudo instructions. defm PseudoVADD : VPseudoBinary_VV_VX_VI; +defm PseudoVSUB : VPseudoBinary_VV_VX; +defm PseudoVRSUB : VPseudoBinary_VX_VI; //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// // Whole-register vector patterns. -defm "" : pat_vop_binary_common; +defm "" : VPatBinarySDNode; -defm "" : pat_intrinsic_binary_int_v_vv_vx_vi<"int_riscv_vadd", "PseudoVADD">; +defm "" : VPatBinary_VV_VX_VI<"int_riscv_vadd", "PseudoVADD">; +defm "" : VPatBinary_VV_VX<"int_riscv_vsub", "PseudoVSUB">; +defm "" : VPatBinary_VX_VI<"int_riscv_vrsub", "PseudoVRSUB">; } // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vrsub.ll @@ -0,0 +1,1499 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vrsub.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv16i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv32i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv64i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv1i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv2i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv4i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv8i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv16i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv32i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv1i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv2i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv4i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv8i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv16i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv2i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv4i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vrsub.nxv8i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vrsub.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vrsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv16i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv32i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv64i8.i8( + undef, + i8 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv64i8.i8( + undef, + undef, + i8 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv16i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv32i16.i16( + undef, + i16 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv32i16.i16( + undef, + undef, + i16 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv16i32.i32( + undef, + i32 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv16i32.i32( + undef, + undef, + i32 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv1i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv1i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv2i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv2i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv4i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv4i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vrsub.nxv8i64.i64( + undef, + i64 9, + i64 undef) + + ret %a +} + +define @intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t + %a = call @llvm.riscv.vrsub.mask.nxv8i64.i64( + undef, + undef, + i64 9, + undef, + i64 undef) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll @@ -0,0 +1,1763 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s + +declare @llvm.riscv.vsub.nxv1i8.nxv1i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i8.nxv1i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i8.nxv1i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i8.nxv2i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i8.nxv2i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i8.nxv2i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i8.nxv4i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i8.nxv4i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i8.nxv4i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i8.nxv8i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i8.nxv8i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i8.nxv8i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i8.nxv16i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i8.nxv16i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i8.nxv16i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i8.nxv32i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv32i8.nxv32i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i8.nxv32i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv64i8.nxv64i8( + , + , + i64); + +define @intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv64i8.nxv64i8( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv64i8.nxv64i8( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i16.nxv1i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i16.nxv1i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i16.nxv1i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i16.nxv2i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i16.nxv2i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i16.nxv2i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i16.nxv4i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i16.nxv4i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i16.nxv4i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i16.nxv8i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i16.nxv8i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i16.nxv8i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i16.nxv16i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i16.nxv16i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i16.nxv16i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i16.nxv32i16( + , + , + i64); + +define @intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv32i16.nxv32i16( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i16.nxv32i16( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i32.nxv1i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i32.nxv1i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i32.nxv1i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i32.nxv2i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i32.nxv2i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i32.nxv2i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i32.nxv4i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i32.nxv4i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i32.nxv4i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i32.nxv8i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i32.nxv8i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i32.nxv8i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i32.nxv16i32( + , + , + i64); + +define @intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv16i32.nxv16i32( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i32.nxv16i32( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i64.nxv1i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv1i64.nxv1i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i64.nxv1i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i64.nxv2i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv2i64.nxv2i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i64.nxv2i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i64.nxv4i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv4i64.nxv4i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i64.nxv4i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i64.nxv8i64( + , + , + i64); + +define @intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vsub.nxv8i64.nxv8i64( + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( + , + , + , + , + i64); + +define @intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vv_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i64.nxv8i64( + undef, + undef, + undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv1i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv2i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv4i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv8i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv16i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv32i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv64i8.i8( + , + i8, + i64); + +define @intrinsic_vsub_vx_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv64i8.i8( + undef, + i8 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv64i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, a0, e8,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv64i8.i8( + undef, + undef, + i8 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv1i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv2i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv4i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv8i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv16i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv32i16.i16( + , + i16, + i64); + +define @intrinsic_vsub_vx_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv32i16.i16( + undef, + i16 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv32i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv32i16.i16( + undef, + undef, + i16 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv1i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv2i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv4i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv8i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv16i32.i32( + , + i32, + i64); + +define @intrinsic_vsub_vx_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv16i32.i32( + undef, + i32 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv16i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv16i32.i32( + undef, + undef, + i32 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv1i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv1i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv2i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv2i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv4i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv4i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.nxv8i64.i64( + , + i64, + i64); + +define @intrinsic_vsub_vx_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0 + %a = call @llvm.riscv.vsub.nxv8i64.i64( + undef, + i64 undef, + i64 undef) + + ret %a +} + +declare @llvm.riscv.vsub.mask.nxv8i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64() nounwind { +entry: +; CHECK-LABEL: intrinsic_vsub_mask_vx_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8 +; CHECK: vsub.vx {{v[0-9]+}}, {{v[0-9]+}}, a0, v0.t + %a = call @llvm.riscv.vsub.mask.nxv8i64.i64( + undef, + undef, + i64 undef, + undef, + i64 undef) + + ret %a +}