diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -113,6 +113,34 @@ [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 3; } + // For binary operations with V0 as input. + // Input: (vector_in, vector_in/scalar_in, V0, vl) + class RISCVBinaryWithV0 + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + // For binary operations with mask type output and V0 as input. + // Output: (mask type output) + // Input: (vector_in, vector_in/scalar_in, V0, vl) + class RISCVBinaryMOutWithV0 + :Intrinsic<[llvm_anyvector_ty], + [llvm_anyvector_ty, llvm_any_ty, LLVMMatchType<0>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } + // For binary operations with mask type output. + // Output: (mask type output) + // Input: (vector_in, vector_in/scalar_in, vl) + class RISCVBinaryMOut + : Intrinsic<[llvm_anyvector_ty], + [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic { + let ExtendOperand = 2; + } multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; @@ -122,6 +150,15 @@ def "int_riscv_" # NAME : RISCVBinaryABXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask; } + multiclass RISCVBinaryWithV0 { + def "int_riscv_" # NAME : RISCVBinaryWithV0; + } + multiclass RISCVBinaryMaskOutWithV0 { + def "int_riscv_" # NAME : RISCVBinaryMOutWithV0; + } + multiclass RISCVBinaryMaskOut { + def "int_riscv_" # NAME : RISCVBinaryMOut; + } defm vadd : RISCVBinaryAAX; defm vsub : RISCVBinaryAAX; @@ -136,4 +173,12 @@ defm vwsubu_w : RISCVBinaryAAX; defm vwsub_w : RISCVBinaryAAX; + defm vadc : RISCVBinaryWithV0; + defm vmadc_carry_in : RISCVBinaryMaskOutWithV0; + defm vmadc : RISCVBinaryMaskOut; + + defm vsbc : RISCVBinaryWithV0; + defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0; + defm vmsbc : RISCVBinaryMaskOut; + } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -277,6 +277,29 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoBinaryCarryIn : + Pseudo<(outs RetClass:$rd), + !if(!eq(CarryIn, 1), + (ins Op1Class:$rs2, Op2Class:$rs1, VMV0:$carry, GPR:$vl, + ixlenimm:$sew), + (ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew)), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = !if(!eq(CarryIn, 1), 4, 3); + let SEWIndex = !if(!eq(CarryIn, 1), 5, 4); + let MergeOpIndex = INVALID_INDEX.V; + let BaseInstr = !cast(PseudoToVInst.VInst); + let VLMul = MInfo.value; +} + multiclass VPseudoBinary; } +multiclass VPseudoBinaryV_VM { + foreach m = MxList.m in + def "_VV" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX : + VPseudoBinaryCarryIn; +} + +multiclass VPseudoBinaryV_XM { + foreach m = MxList.m in + def "_VX" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX : + VPseudoBinaryCarryIn; +} + +multiclass VPseudoBinaryV_IM { + foreach m = MxList.m in + def "_VI" # !if(!eq(CarryIn, 1), "M", "") # "_" # m.MX : + VPseudoBinaryCarryIn; +} + multiclass VPseudoBinaryV_VV_VX_VI { defm "" : VPseudoBinaryV_VV; defm "" : VPseudoBinaryV_VX; @@ -362,6 +406,39 @@ defm "" : VPseudoBinaryW_WX; } +multiclass VPseudoBinaryV_VM_XM_IM { + defm "" : VPseudoBinaryV_VM; + defm "" : VPseudoBinaryV_XM; + defm "" : VPseudoBinaryV_IM; +} + +multiclass VPseudoBinaryV_VM_XM { + defm "" : VPseudoBinaryV_VM; + defm "" : VPseudoBinaryV_XM; +} + +multiclass VPseudoBinaryM_VM_XM_IM { + defm "" : VPseudoBinaryV_VM; + defm "" : VPseudoBinaryV_XM; + defm "" : VPseudoBinaryV_IM; +} + +multiclass VPseudoBinaryM_VM_XM { + defm "" : VPseudoBinaryV_VM; + defm "" : VPseudoBinaryV_XM; +} + +multiclass VPseudoBinaryM_V_X_I { + defm "" : VPseudoBinaryV_VM; + defm "" : VPseudoBinaryV_XM; + defm "" : VPseudoBinaryV_IM; +} + +multiclass VPseudoBinaryM_V_X { + defm "" : VPseudoBinaryV_VM; + defm "" : VPseudoBinaryV_XM; +} + //===----------------------------------------------------------------------===// // Helpers to define the different patterns. //===----------------------------------------------------------------------===// @@ -453,6 +530,50 @@ op2_kind>; } +multiclass VPatBinaryCarryIn +{ + def : Pat<(result_type (!cast(intrinsic) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op1_type op1_reg_class:$rs1), + ToFPR32.ret, + (mask_type V0), (NoX0 GPR:$vl), sew)>; +} + +multiclass VPatBinaryMaskOut +{ + def : Pat<(result_type (!cast(intrinsic) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op1_type op1_reg_class:$rs1), + ToFPR32.ret, + (NoX0 GPR:$vl), sew)>; +} + multiclass VPatBinaryV_VV vtilist> { foreach vti = vtilist in @@ -524,6 +645,60 @@ } } +multiclass VPatBinaryV_VM { + foreach vti = AllIntegerVectors in + defm : VPatBinaryCarryIn; +} + +multiclass VPatBinaryV_XM { + foreach vti = AllIntegerVectors in + defm : VPatBinaryCarryIn; +} + +multiclass VPatBinaryV_IM { + foreach vti = AllIntegerVectors in + defm : VPatBinaryCarryIn; +} + +multiclass VPatBinaryV_V { + foreach vti = AllIntegerVectors in + defm : VPatBinaryMaskOut; +} + +multiclass VPatBinaryV_X { + foreach vti = AllIntegerVectors in + defm : VPatBinaryMaskOut; +} + +multiclass VPatBinaryV_I { + foreach vti = AllIntegerVectors in + defm : VPatBinaryMaskOut; +} + multiclass VPatBinaryV_VV_VX_VI vtilist> { @@ -558,6 +733,45 @@ defm "" : VPatBinaryW_WX; } +multiclass VPatBinaryV_VM_XM_IM +{ + defm "" : VPatBinaryV_VM; + defm "" : VPatBinaryV_XM; + defm "" : VPatBinaryV_IM; +} + +multiclass VPatBinaryM_VM_XM_IM +{ + defm "" : VPatBinaryV_VM; + defm "" : VPatBinaryV_XM; + defm "" : VPatBinaryV_IM; +} + +multiclass VPatBinaryM_V_X_I +{ + defm "" : VPatBinaryV_V; + defm "" : VPatBinaryV_X; + defm "" : VPatBinaryV_I; +} + +multiclass VPatBinaryV_VM_XM +{ + defm "" : VPatBinaryV_VM; + defm "" : VPatBinaryV_XM; +} + +multiclass VPatBinaryM_VM_XM +{ + defm "" : VPatBinaryV_VM; + defm "" : VPatBinaryV_XM; +} + +multiclass VPatBinaryM_V_X +{ + defm "" : VPatBinaryV_V; + defm "" : VPatBinaryV_X; +} + //===----------------------------------------------------------------------===// // Pseudo instructions and patterns. //===----------------------------------------------------------------------===// @@ -694,6 +908,16 @@ defm PseudoVWADD : VPseudoBinaryW_WV_WX; defm PseudoVWSUB : VPseudoBinaryW_WV_WX; +//===----------------------------------------------------------------------===// +// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions +//===----------------------------------------------------------------------===// +defm PseudoVADC : VPseudoBinaryV_VM_XM_IM; +defm PseudoVMADC : VPseudoBinaryM_VM_XM_IM; +defm PseudoVMADC : VPseudoBinaryM_V_X_I; + +defm PseudoVSBC : VPseudoBinaryV_VM_XM; +defm PseudoVMSBC : VPseudoBinaryM_VM_XM; +defm PseudoVMSBC : VPseudoBinaryM_V_X; //===----------------------------------------------------------------------===// // Patterns. @@ -725,4 +949,15 @@ defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD">; defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB">; +//===----------------------------------------------------------------------===// +// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VM_XM_IM<"int_riscv_vadc", "PseudoVADC">; +defm "" : VPatBinaryM_VM_XM_IM<"int_riscv_vmadc_carry_in", "PseudoVMADC">; +defm "" : VPatBinaryM_V_X_I<"int_riscv_vmadc", "PseudoVMADC">; + +defm "" : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">; +defm "" : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">; +defm "" : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">; + } // Predicates = [HasStdExtV] diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll @@ -0,0 +1,973 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vadc.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv64i8.nxv64i8( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv64i8.nxv64i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i16.nxv32i16( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i16.nxv32i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i32.nxv16i32( + , + , + , + i32); + +define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i32.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv64i8.i8( + , + i8, + , + i32); + +define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv64i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i16.i16( + , + i16, + , + i32); + +define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i32.i32( + , + i32, + , + i32); + +define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv16i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv32i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv64i8.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv16i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv32i16.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv16i32.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll @@ -0,0 +1,1189 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vadc.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv64i8.nxv64i8( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv64i8.nxv64i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i16.nxv32i16( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i16.nxv32i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i32.nxv16i32( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i32.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i64.nxv8i64( + , + , + , + i64); + +define @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i64.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv64i8.i8( + , + i8, + , + i64); + +define @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv64i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv32i16.i16( + , + i16, + , + i64); + +define @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv32i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv16i32.i32( + , + i32, + , + i64); + +define @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv16i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vadc.nxv8i64.i64( + , + i64, + , + i64); + +define @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vadc.nxv8i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv16i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv32i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv64i8.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv16i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv32i16.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv16i32.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv1i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv2i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv4i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vadc.nxv8i64.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll @@ -0,0 +1,883 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmadc.nxv1i1.nxv1i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.nxv16i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.nxv32i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv64i1.nxv64i8( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv64i1.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.nxv1i16( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i16( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i16( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i16( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.nxv16i16( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.nxv32i16( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.nxv1i32( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i32( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i32( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i32( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.nxv16i32( + , + , + i32); + +define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv64i1.i8( + , + i8, + i32); + +define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i16( + , + i16, + i32); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i16( + , + i16, + i32); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i16( + , + i16, + i32); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i16( + , + i16, + i32); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.i16( + , + i16, + i32); + +define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.i16( + , + i16, + i32); + +define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i32( + , + i32, + i32); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i32( + , + i32, + i32); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i32( + , + i32, + i32); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i32( + , + i32, + i32); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.i32( + , + i32, + i32); + +define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %0, + i8 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %0, + i16 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %0, + i32 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %0, + i32 9, + i32 %1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll @@ -0,0 +1,1079 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmadc.nxv1i1.nxv1i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.nxv16i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.nxv32i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv64i1.nxv64i8( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv64i1.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.nxv1i16( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i16( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i16( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i16( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.nxv16i16( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.nxv32i16( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.nxv1i32( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i32( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i32( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i32( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.nxv16i32( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.nxv1i64( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.nxv2i64( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.nxv4i64( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.nxv8i64( + , + , + i64); + +define @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmadc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv64i1.i8( + , + i8, + i64); + +define @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i16( + , + i16, + i64); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i16( + , + i16, + i64); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i16( + , + i16, + i64); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i16( + , + i16, + i64); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.i16( + , + i16, + i64); + +define @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv32i1.i16( + , + i16, + i64); + +define @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i32( + , + i32, + i64); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i32( + , + i32, + i64); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i32( + , + i32, + i64); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i32( + , + i32, + i64); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv16i1.i32( + , + i32, + i64); + +define @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv1i1.i64( + , + i64, + i64); + +define @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv1i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv2i1.i64( + , + i64, + i64); + +define @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv2i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv4i1.i64( + , + i64, + i64); + +define @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv4i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmadc.nxv8i1.i64( + , + i64, + i64); + +define @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmadc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmadc.nxv8i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv16i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv32i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv64i1.i8( + %0, + i8 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv16i1.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv32i1.i16( + %0, + i16 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv16i1.i32( + %0, + i32 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv1i1.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv2i1.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv4i1.i64( + %0, + i64 9, + i64 %1) + + ret %a +} + +define @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmadc.nxv8i1.i64( + %0, + i64 9, + i64 %1) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll @@ -0,0 +1,973 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( + , + , + , + i32); + +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %0, + i8 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %0, + i16 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %0, + i32 9, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv64.ll @@ -0,0 +1,1189 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.nxv64i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.nxv32i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv1i1_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv2i1_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv4i1_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64( + , + , + , + i64); + +define @intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vvm_nxv8i1_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmadc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv1i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv1i1_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv2i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv2i1_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv4i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv4i1_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmadc.carry.in.nxv8i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vxm_nxv8i1_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmadc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv64i1.i8( + %0, + i8 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv32i1.i16( + %0, + i16 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv16i1.i32( + %0, + i32 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv1i1.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv2i1.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv4i1.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} + +define @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0 + %a = call @llvm.riscv.vmadc.carry.in.nxv8i1.i64( + %0, + i64 9, + %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv32.ll @@ -0,0 +1,649 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.nxv16i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.nxv32i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv64i1.nxv64i8( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv64i1.nxv64i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i16( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i16( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i16( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i16( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.nxv16i16( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.nxv32i16( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i32( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i32( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i32( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i32( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.nxv16i32( + , + , + i32); + +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv64i1.i8( + , + i8, + i32); + +define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv64i1.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i16( + , + i16, + i32); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i16( + , + i16, + i32); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i16( + , + i16, + i32); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i16( + , + i16, + i32); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.i16( + , + i16, + i32); + +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.i16( + , + i16, + i32); + +define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i32( + , + i32, + i32); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i32( + , + i32, + i32); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i32( + , + i32, + i32); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i32( + , + i32, + i32); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.i32( + , + i32, + i32); + +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc-rv64.ll @@ -0,0 +1,793 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.nxv16i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.nxv32i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv64i1.nxv64i8( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv64i1.nxv64i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i16( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i16( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i16( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i16( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.nxv16i16( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.nxv32i16( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i32( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i32( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i32( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i32( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.nxv16i32( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.nxv1i64( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv1i1_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.nxv2i64( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv2i1_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.nxv4i64( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv4i1_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.nxv8i64( + , + , + i64); + +define @intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vv_nxv8i1_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmsbc.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv64i1.i8( + , + i8, + i64); + +define @intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8( %0, i8 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv64i1.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i16( + , + i16, + i64); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i16( + , + i16, + i64); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i16( + , + i16, + i64); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i16( + , + i16, + i64); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.i16( + , + i16, + i64); + +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv32i1.i16( + , + i16, + i64); + +define @intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16( %0, i16 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv32i1.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i32( + , + i32, + i64); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i32( + , + i32, + i64); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i32( + , + i32, + i64); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i32( + , + i32, + i64); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv16i1.i32( + , + i32, + i64); + +define @intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32( %0, i32 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv16i1.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv1i1.i64( + , + i64, + i64); + +define @intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv1i1_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv1i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv2i1.i64( + , + i64, + i64); + +define @intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv2i1_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv2i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv4i1.i64( + , + i64, + i64); + +define @intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv4i1_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv4i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsbc.nxv8i1.i64( + , + i64, + i64); + +define @intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64( %0, i64 %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc_vx_nxv8i1_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmsbc.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}} + %a = call @llvm.riscv.vmsbc.nxv8i1.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv32.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( + , + , + , + i32); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( + , + i8, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( + , + i16, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( + , + i32, + , + i32); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbc.borrow.in-rv64.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv64i1_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.nxv64i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv32i1_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.nxv32i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv16i1_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv1i1_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv2i1_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv4i1_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64( + , + , + , + i64); + +define @intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vvm_nxv8i1_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( + , + i8, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv64i1_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv64i1.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( + , + i16, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv32i1_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv32i1.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( + , + i32, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv16i1_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv16i1.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv1i1_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv1i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv2i1_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv2i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv4i1_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv4i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64( + , + i64, + , + i64); + +define @intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbc.borrow.in_vxm_nxv8i1_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vmsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vmsbc.borrow.in.nxv8i1.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv32.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i8.nxv1i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i8.nxv2i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i8.nxv4i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i8.nxv8i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i8.nxv16i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i8.nxv32i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv64i8.nxv64i8( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i16.nxv1i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i16.nxv2i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i16.nxv4i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i16.nxv8i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i16.nxv16i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i16.nxv32i16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i32.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i32.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i32.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i32.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( + , + , + , + i32); + +define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i32.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv64i8.i8( + , + i8, + , + i32); + +define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv64i8.i8( + %0, + i8 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i16.i16( + , + i16, + , + i32); + +define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i16.i16( + , + i16, + , + i32); + +define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i16.i16( + , + i16, + , + i32); + +define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i16.i16( + , + i16, + , + i32); + +define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i16.i16( + , + i16, + , + i32); + +define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i16.i16( + , + i16, + , + i32); + +define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i16.i16( + %0, + i16 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i32.i32( + , + i32, + , + i32); + +define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i32.i32( + , + i32, + , + i32); + +define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i32.i32( + , + i32, + , + i32); + +define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i32.i32( + , + i32, + , + i32); + +define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i32.i32( + , + i32, + , + i32); + +define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i32.i32( + %0, + i32 %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsbc-rv64.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vsbc.nxv1i8.nxv1i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i8.nxv1i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i8.nxv2i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i8.nxv2i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i8.nxv4i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i8.nxv4i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i8.nxv8i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i8.nxv8i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i8.nxv16i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i8.nxv16i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i8.nxv32i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i8.nxv32i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv64i8.nxv64i8( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv64i8.nxv64i8( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i16.nxv1i16( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i16.nxv1i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i16.nxv2i16( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i16.nxv2i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i16.nxv4i16( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i16.nxv4i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i16.nxv8i16( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i16.nxv8i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i16.nxv16i16( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i16.nxv16i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i16.nxv32i16( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i16.nxv32i16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i32.nxv1i32( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i32.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i32.nxv2i32( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i32.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i32.nxv4i32( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i32.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i32.nxv8i32( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i32.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i32.nxv16i32( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i32.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i64.nxv1i64( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i64.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i64.nxv2i64( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i64.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i64.nxv4i64( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i64.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i64.nxv8i64( + , + , + , + i64); + +define @intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vvm_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsbc.vvm {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i64.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i8_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i8_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i8_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i8_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i8_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i8_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv64i8.i8( + , + i8, + , + i64); + +define @intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8( %0, i8 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv64i8_nxv64i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv64i8.i8( + %0, + i8 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i16.i16( + , + i16, + , + i64); + +define @intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i16_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i16.i16( + , + i16, + , + i64); + +define @intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i16_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i16.i16( + , + i16, + , + i64); + +define @intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i16_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i16.i16( + , + i16, + , + i64); + +define @intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i16_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i16.i16( + , + i16, + , + i64); + +define @intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i16_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv32i16.i16( + , + i16, + , + i64); + +define @intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16( %0, i16 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv32i16_nxv32i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv32i16.i16( + %0, + i16 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i32.i32( + , + i32, + , + i64); + +define @intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i32_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i32.i32( + , + i32, + , + i64); + +define @intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i32_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i32.i32( + , + i32, + , + i64); + +define @intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i32_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i32.i32( + , + i32, + , + i64); + +define @intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i32_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv16i32.i32( + , + i32, + , + i64); + +define @intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32( %0, i32 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv16i32_nxv16i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv16i32.i32( + %0, + i32 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv1i64.i64( + , + i64, + , + i64); + +define @intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv1i64_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv1i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv2i64.i64( + , + i64, + , + i64); + +define @intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv2i64_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv2i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv4i64.i64( + , + i64, + , + i64); + +define @intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv4i64_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv4i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vsbc.nxv8i64.i64( + , + i64, + , + i64); + +define @intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64( %0, i64 %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsbc_vxm_nxv8i64_nxv8i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsbc.vxm {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0 + %a = call @llvm.riscv.vsbc.nxv8i64.i64( + %0, + i64 %1, + %2, + i64 %3) + + ret %a +}