diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -389,6 +389,21 @@ : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>], [IntrNoMem]>, RISCVVIntrinsic; + // For mask unary operations with mask type in/out without mask + // Output: (mask type output) + // Input: (mask type vector_in, vl) + class RISCVMaskUnaryMOutNoMask + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // For mask unary operations with mask type in/out with mask + // Output: (mask type output) + // Input: (mask type maskedoff, mask type vector_in, mask, vl) + class RISCVMaskUnaryMOutMask + : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; multiclass RISCVUSLoad { def "int_riscv_" # NAME : RISCVUSLoad; @@ -469,6 +484,10 @@ def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask; def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask; } + multiclass RISCVMaskUnaryMOut { + def "int_riscv_" # NAME : RISCVMaskUnaryMOutNoMask; + def "int_riscv_" # NAME # "_mask" : RISCVMaskUnaryMOutMask; + } defm vle : RISCVUSLoad; defm vleff : RISCVUSLoad; @@ -685,5 +704,35 @@ defm vpopc : RISCVMaskUnarySOut; defm vfirst : RISCVMaskUnarySOut; + defm vmsbf : RISCVMaskUnaryMOut; + defm vmsof : RISCVMaskUnaryMOut; + defm vmsif : RISCVMaskUnaryMOut; + + // Output: (vector) + // Input: (mask type input, vl) + def int_riscv_viota : Intrinsic<[llvm_anyvector_ty], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // Output: (vector) + // Input: (maskedoff, mask type vector_in, mask, vl) + def int_riscv_viota_mask : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // Output: (vector) + // Input: (vl) + def int_riscv_vid : Intrinsic<[llvm_anyvector_ty], + [llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + // Output: (vector) + // Input: (maskedoff, mask, vl) + def int_riscv_vid_mask : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -507,14 +507,46 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VMaskPseudoUnarySOutNoMask: - Pseudo<(outs GPR:$rd), - (ins VR:$rs1, GPR:$vl, ixlenimm:$sew), []>, +class VPseudoNullaryNoMask: + Pseudo<(outs RegClass:$rd), + (ins GPR:$vl, ixlenimm:$sew), + []>, RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 1; + let SEWIndex = 2; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoNullaryMask: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, VMaskOp:$vm, GPR:$vl, + ixlenimm:$sew), []>, RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints ="$rd = $merge"; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoUnaryNoMask : + Pseudo<(outs RetClass:$rd), + (ins OpClass:$rs2, GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo { let mayLoad = 0; let mayStore = 0; let hasSideEffects = 0; let usesCustomInserter = 1; + let Constraints = Constraint; let Uses = [VL, VTYPE]; let VLIndex = 2; let SEWIndex = 3; @@ -522,7 +554,25 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } -class VMaskPseudoUnarySOutMask: +class VPseudoUnaryMask : + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, OpClass:$rs2, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +// mask unary operation without maskedoff +class VPseudoMaskUnarySOutMask: Pseudo<(outs GPR:$rd), (ins VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, RISCVVPseudo { @@ -533,7 +583,23 @@ let Uses = [VL, VTYPE]; let VLIndex = 3; let SEWIndex = 4; - let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +// Masked mask operation have no $rd=$merge constraints +class VPseudoUnaryMOutMask: + Pseudo<(outs VR:$rd), + (ins VR:$merge, VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let MergeOpIndex = 1; let BaseInstr = !cast(PseudoToVInst.VInst); } @@ -725,12 +791,41 @@ } } -multiclass VMaskPseudoUnarySOut { +multiclass VPseudoUnaryS_M { foreach mti = AllMasks in { let VLMul = mti.LMul.value in { - def "_M_" # mti.BX : VMaskPseudoUnarySOutNoMask; - def "_M_" # mti.BX # "_MASK" : VMaskPseudoUnarySOutMask; + def "_M_" # mti.BX : VPseudoUnaryNoMask; + def "_M_" # mti.BX # "_MASK" : VPseudoMaskUnarySOutMask; + } + } +} + +multiclass VPseudoUnaryM_M { + foreach mti = AllMasks in + { + let VLMul = mti.LMul.value in { + def "_M_" # mti.BX : VPseudoUnaryNoMask; + def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask; + } + } +} + +multiclass VPseudoMaskNullaryV { + foreach m = MxList.m in { + let VLMul = m.value in { + def "_V_" # m.MX : VPseudoNullaryNoMask; + def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask; + } + } +} + +multiclass VPseudoUnaryV_M { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxList.m in { + let VLMul = m.value in { + def "_" # m.MX : VPseudoUnaryNoMask; + def "_" # m.MX # "_MASK" : VPseudoUnaryMask; } } } @@ -1044,6 +1139,65 @@ //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// + +class VPatUnaryNoMask : + Pat<(result_type (!cast(intrinsic_name) + (op2_type op2_reg_class:$rs2), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX) + (op2_type op2_reg_class:$rs2), + (NoX0 GPR:$vl), sew)>; + +class VPatUnaryMask : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), + (XLenVT GPR:$vl))), + (!cast(inst#"_"#kind#"_"#vlmul.MX#"_MASK") + (result_type result_reg_class:$merge), + (op2_type op2_reg_class:$rs2), + (mask_type V0), (NoX0 GPR:$vl), sew)>; + +class VPatMaskUnaryNoMask : + Pat<(mti.Mask (!cast(intrinsic_name) + (mti.Mask VR:$rs2), + (XLenVT GPR:$vl))), + (!cast(inst#"_M_"#mti.BX) + (mti.Mask VR:$rs2), + (NoX0 GPR:$vl), mti.SEW)>; + +class VPatMaskUnaryMask : + Pat<(mti.Mask (!cast(intrinsic_name#"_mask") + (mti.Mask VR:$merge), + (mti.Mask VR:$rs2), + (mti.Mask V0), + (XLenVT GPR:$vl))), + (!cast(inst#"_M_"#mti.BX#"_MASK") + (mti.Mask VR:$merge), + (mti.Mask VR:$rs2), + (mti.Mask V0), (NoX0 GPR:$vl), mti.SEW)>; + class VPatBinaryNoMask; } -multiclass VPatMaskUnarySOut { foreach mti = AllMasks in { @@ -1274,6 +1428,40 @@ } } +multiclass VPatUnaryM_M +{ + foreach mti = AllMasks in { + def : VPatMaskUnaryNoMask; + def : VPatMaskUnaryMask; + } +} + +multiclass VPatUnaryV_M +{ + foreach vti = AllIntegerVectors in { + def : VPatUnaryNoMask; + def : VPatUnaryMask; + } +} + +multiclass VPatNullaryV +{ + foreach vti = AllIntegerVectors in { + def : Pat<(vti.Vector (!cast(intrinsic) + (XLenVT GPR:$vl))), + (!cast(instruction#"_V_" # vti.LMul.MX) + (NoX0 GPR:$vl), vti.SEW)>; + def : Pat<(vti.Vector (!cast(intrinsic # "_mask") + (vti.Vector vti.RegClass:$merge), + (vti.Mask V0), (XLenVT GPR:$vl))), + (!cast(instruction#"_V_" # vti.LMul.MX # "_MASK") + vti.RegClass:$merge, (vti.Mask V0), + (NoX0 GPR:$vl), vti.SEW)>; + } +} multiclass VPatBinary; - defm "" : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">; - defm "" : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">; - defm "" : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">; - defm "" : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">; - defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">; - defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">; - defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">; -} // Predicates = [HasStdExtV] +defm "" : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">; +defm "" : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">; +defm "" : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">; +defm "" : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">; +defm "" : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">; +defm "" : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">; +defm "" : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">; +defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">; //===----------------------------------------------------------------------===// // 16.2. Vector mask population count vpopc //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { - defm "" : VPatMaskUnarySOut<"int_riscv_vpopc", "PseudoVPOPC">; -} // Predicates = [HasStdExtV] +defm "" : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">; //===----------------------------------------------------------------------===// // 16.3. vfirst find-first-set mask bit //===----------------------------------------------------------------------===// -let Predicates = [HasStdExtV] in { - defm "" : VPatMaskUnarySOut<"int_riscv_vfirst", "PseudoVFIRST">; +defm "" : VPatUnaryS_M<"int_riscv_vfirst", "PseudoVFIRST">; + +//===----------------------------------------------------------------------===// +// 16.4. vmsbf.m set-before-first mask bit +//===----------------------------------------------------------------------===// +defm "" : VPatUnaryM_M<"int_riscv_vmsbf", "PseudoVMSBF">; + +//===----------------------------------------------------------------------===// +// 16.5. vmsif.m set-including-first mask bit +//===----------------------------------------------------------------------===// +defm "" : VPatUnaryM_M<"int_riscv_vmsif", "PseudoVMSIF">; + +//===----------------------------------------------------------------------===// +// 16.6. vmsof.m set-only-first mask bit +//===----------------------------------------------------------------------===// +defm "" : VPatUnaryM_M<"int_riscv_vmsof", "PseudoVMSOF">; + +//===----------------------------------------------------------------------===// +// 16.8. Vector Iota Instruction +//===----------------------------------------------------------------------===// +defm "" : VPatUnaryV_M<"int_riscv_viota", "PseudoVIOTA">; + +//===----------------------------------------------------------------------===// +// 16.9. Vector Element Index Instruction +//===----------------------------------------------------------------------===// +defm "" : VPatNullaryV<"int_riscv_vid", "PseudoVID">; + } // Predicates = [HasStdExtV] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv32.ll @@ -0,0 +1,545 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vid.nxv1i8( + i32); + +define @intrinsic_vid_v_nxv1i8(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i8( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i8( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv1i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i8( + i32); + +define @intrinsic_vid_v_nxv2i8(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i8( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i8( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv2i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i8( + i32); + +define @intrinsic_vid_v_nxv4i8(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i8( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i8( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv4i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i8( + i32); + +define @intrinsic_vid_v_nxv8i8(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i8( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i8( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv8i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv16i8( + i32); + +define @intrinsic_vid_v_nxv16i8(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv16i8( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv16i8( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv16i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv32i8( + i32); + +define @intrinsic_vid_v_nxv32i8(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv32i8( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv32i8( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv32i8( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv1i16( + i32); + +define @intrinsic_vid_v_nxv1i16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i16( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i16( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv1i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i16( + i32); + +define @intrinsic_vid_v_nxv2i16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i16( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i16( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv2i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i16( + i32); + +define @intrinsic_vid_v_nxv4i16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i16( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i16( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv4i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i16( + i32); + +define @intrinsic_vid_v_nxv8i16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i16( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i16( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv8i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv16i16( + i32); + +define @intrinsic_vid_v_nxv16i16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv16i16( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv16i16( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv16i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv32i16( + i32); + +define @intrinsic_vid_v_nxv32i16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv32i16( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv32i16( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv32i16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv32i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv1i32( + i32); + +define @intrinsic_vid_v_nxv1i32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i32( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i32( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv1i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i32( + i32); + +define @intrinsic_vid_v_nxv2i32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i32( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i32( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv2i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i32( + i32); + +define @intrinsic_vid_v_nxv4i32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i32( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i32( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv4i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i32( + i32); + +define @intrinsic_vid_v_nxv8i32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i32( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i32( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv8i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv16i32( + i32); + +define @intrinsic_vid_v_nxv16i32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv16i32( + i32 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv16i32( + , + , + i32); + +define @intrinsic_vid_mask_v_nxv16i32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv16i32( + %0, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vid-rv64.ll @@ -0,0 +1,673 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vid.nxv1i8( + i64); + +define @intrinsic_vid_v_nxv1i8(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i8( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i8( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv1i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i8( + i64); + +define @intrinsic_vid_v_nxv2i8(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i8( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i8( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv2i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i8( + i64); + +define @intrinsic_vid_v_nxv4i8(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i8( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i8( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv4i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i8 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i8( + i64); + +define @intrinsic_vid_v_nxv8i8(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i8( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i8( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv8i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i8 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv16i8( + i64); + +define @intrinsic_vid_v_nxv16i8(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv16i8( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv16i8( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv16i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i8 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv32i8( + i64); + +define @intrinsic_vid_v_nxv32i8(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv32i8( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv32i8( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv32i8( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i8 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv1i16( + i64); + +define @intrinsic_vid_v_nxv1i16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i16( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i16( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv1i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i16( + i64); + +define @intrinsic_vid_v_nxv2i16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i16( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i16( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv2i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i16( + i64); + +define @intrinsic_vid_v_nxv4i16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i16( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i16( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv4i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i16( + i64); + +define @intrinsic_vid_v_nxv8i16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i16( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i16( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv8i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv16i16( + i64); + +define @intrinsic_vid_v_nxv16i16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv16i16( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv16i16( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv16i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv32i16( + i64); + +define @intrinsic_vid_v_nxv32i16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv32i16( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv32i16( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv32i16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv32i16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv32i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv1i32( + i64); + +define @intrinsic_vid_v_nxv1i32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i32( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i32( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv1i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i32( + i64); + +define @intrinsic_vid_v_nxv2i32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i32( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i32( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv2i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i32( + i64); + +define @intrinsic_vid_v_nxv4i32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i32( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i32( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv4i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i32( + i64); + +define @intrinsic_vid_v_nxv8i32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i32( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i32( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv8i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv16i32( + i64); + +define @intrinsic_vid_v_nxv16i32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv16i32( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv16i32( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv16i32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv16i32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv16i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv1i64( + i64); + +define @intrinsic_vid_v_nxv1i64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv1i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv1i64( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv1i64( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv1i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i64 +; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv2i64( + i64); + +define @intrinsic_vid_v_nxv2i64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv2i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv2i64( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv2i64( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv2i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv2i64 +; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv4i64( + i64); + +define @intrinsic_vid_v_nxv4i64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv4i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv4i64( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv4i64( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv4i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv4i64 +; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vid.nxv8i64( + i64); + +define @intrinsic_vid_v_nxv8i64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_v_nxv8i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}} + %a = call @llvm.riscv.vid.nxv8i64( + i64 %0) + + ret %a +} + +declare @llvm.riscv.vid.mask.nxv8i64( + , + , + i64); + +define @intrinsic_vid_mask_v_nxv8i64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vid_mask_v_nxv8i64 +; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vid.v {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vid.mask.nxv8i64( + %0, + %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv32.ll @@ -0,0 +1,722 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.viota.nxv1i8( + , + i32); + +define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i8( + , + i32); + +define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i8( + , + i32); + +define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i8( + , + i32); + +define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv16i8( + , + i32); + +define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv16i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv16i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv16i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv32i8( + , + i32); + +define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv32i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv32i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv32i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv64i8( + , + i32); + +define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv64i8( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv64i8( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv64i8( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv1i16( + , + i32); + +define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i16( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i16( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i16( + , + i32); + +define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i16( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i16( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i16( + , + i32); + +define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i16( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i16( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i16( + , + i32); + +define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i16( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i16( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv16i16( + , + i32); + +define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv16i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv16i16( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv16i16( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv32i16( + , + i32); + +define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv32i16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv32i16( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv32i16( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv1i32( + , + i32); + +define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i32( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i32( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i32( + , + i32); + +define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i32( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i32( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i32( + , + i32); + +define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i32( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i32( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i32( + , + i32); + +define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i32( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i32( + %0, + %1, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv16i32( + , + i32); + +define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv16i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv16i32( + , + , + , + i32); + +define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv16i32( + %0, + %1, + %1, + i32 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/viota-rv64.ll @@ -0,0 +1,882 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.viota.nxv1i8( + , + i64); + +define @intrinsic_viota_m_nxv1i8_nxv1i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i8( + , + i64); + +define @intrinsic_viota_m_nxv2i8_nxv2i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i8_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv2i8_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i8_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i8( + , + i64); + +define @intrinsic_viota_m_nxv4i8_nxv4i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i8_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv4i8_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i8_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i8( + , + i64); + +define @intrinsic_viota_m_nxv8i8_nxv8i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i8_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv8i8_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i8_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv16i8( + , + i64); + +define @intrinsic_viota_m_nxv16i8_nxv16i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv16i8_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv16i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv16i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv16i8_nxv16i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i8_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv16i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv32i8( + , + i64); + +define @intrinsic_viota_m_nxv32i8_nxv32i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv32i8_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv32i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv32i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv32i8_nxv32i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i8_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv32i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv64i8( + , + i64); + +define @intrinsic_viota_m_nxv64i8_nxv64i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv64i8_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv64i8( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv64i8( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv64i8_nxv64i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv64i8_nxv64i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv64i8( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv1i16( + , + i64); + +define @intrinsic_viota_m_nxv1i16_nxv1i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i16_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i16( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv1i16_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i16_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i16( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i16( + , + i64); + +define @intrinsic_viota_m_nxv2i16_nxv2i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i16_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i16( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv2i16_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i16_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i16( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i16( + , + i64); + +define @intrinsic_viota_m_nxv4i16_nxv4i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i16_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i16( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv4i16_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i16_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i16( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i16( + , + i64); + +define @intrinsic_viota_m_nxv8i16_nxv8i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i16_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i16( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv8i16_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i16_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i16( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv16i16( + , + i64); + +define @intrinsic_viota_m_nxv16i16_nxv16i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv16i16_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv16i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv16i16( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv16i16_nxv16i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i16_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv16i16( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv32i16( + , + i64); + +define @intrinsic_viota_m_nxv32i16_nxv32i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv32i16_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv32i16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv32i16( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv32i16_nxv32i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv32i16_nxv32i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv32i16( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv1i32( + , + i64); + +define @intrinsic_viota_m_nxv1i32_nxv1i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i32_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i32( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv1i32_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i32_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i32( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i32( + , + i64); + +define @intrinsic_viota_m_nxv2i32_nxv2i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i32_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i32( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv2i32_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i32_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i32( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i32( + , + i64); + +define @intrinsic_viota_m_nxv4i32_nxv4i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i32_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i32( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv4i32_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i32_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i32( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i32( + , + i64); + +define @intrinsic_viota_m_nxv8i32_nxv8i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i32_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i32( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv8i32_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i32_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i32( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv16i32( + , + i64); + +define @intrinsic_viota_m_nxv16i32_nxv16i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv16i32_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv16i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv16i32( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv16i32_nxv16i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv16i32_nxv16i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv16i32( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv1i64( + , + i64); + +define @intrinsic_viota_m_nxv1i64_nxv1i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv1i64_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv1i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv1i64( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv1i64_nxv1i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i64_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv1i64( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv2i64( + , + i64); + +define @intrinsic_viota_m_nxv2i64_nxv2i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv2i64_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv2i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv2i64( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv2i64_nxv2i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv2i64_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv2i64( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv4i64( + , + i64); + +define @intrinsic_viota_m_nxv4i64_nxv4i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv4i64_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv4i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv4i64( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv4i64_nxv4i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv4i64_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv4i64( + %0, + %1, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.viota.nxv8i64( + , + i64); + +define @intrinsic_viota_m_nxv8i64_nxv8i1( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_viota_m_nxv8i64_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.nxv8i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.viota.mask.nxv8i64( + , + , + , + i64); + +define @intrinsic_viota_mask_m_nxv8i64_nxv8i1( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_viota_mask_m_nxv8i64_nxv8i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu +; CHECK-NEXT: viota.m v16, v0, v0.t +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.viota.mask.nxv8i64( + %0, + %1, + %1, + i64 %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv32.ll @@ -0,0 +1,239 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsbf.nxv1i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv1i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv1i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv1i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv1i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv2i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv2i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv2i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv2i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv2i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv4i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv4i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv4i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv4i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv4i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv8i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv8i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv8i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv8i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv8i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv16i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv16i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv16i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv16i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv16i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv32i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv32i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv32i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv32i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv32i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv64i1( + , + i32); + +define @intrinsic_vmsbf_m_nxv64i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv64i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv64i1( + , + , + , + i32); + +define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv64i1( + %0, + %1, + %2, + i32 %3) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf-rv64.ll @@ -0,0 +1,239 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsbf.nxv1i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv1i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv1i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv1i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv1i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv2i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv2i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv2i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv2i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv2i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv4i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv4i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv4i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv4i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv4i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv8i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv8i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv8i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv8i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv8i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv16i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv16i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv16i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv16i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv16i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv32i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv32i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv32i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv32i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv32i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsbf.nxv64i1( + , + i64); + +define @intrinsic_vmsbf_m_nxv64i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsbf.nxv64i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsbf.mask.nxv64i1( + , + , + , + i64); + +define @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsbf.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsbf.mask.nxv64i1( + %0, + %1, + %2, + i64 %3) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv32.ll @@ -0,0 +1,239 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsif.nxv1i1( + , + i32); + +define @intrinsic_vmsif_m_nxv1i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv1i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv1i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv1i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv2i1( + , + i32); + +define @intrinsic_vmsif_m_nxv2i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv2i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv2i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv2i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv4i1( + , + i32); + +define @intrinsic_vmsif_m_nxv4i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv4i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv4i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv4i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv8i1( + , + i32); + +define @intrinsic_vmsif_m_nxv8i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv8i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv8i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv8i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv16i1( + , + i32); + +define @intrinsic_vmsif_m_nxv16i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv16i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv16i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv16i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv32i1( + , + i32); + +define @intrinsic_vmsif_m_nxv32i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv32i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv32i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv32i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv64i1( + , + i32); + +define @intrinsic_vmsif_m_nxv64i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv64i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv64i1( + , + , + , + i32); + +define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv64i1( + %0, + %1, + %2, + i32 %3) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsif-rv64.ll @@ -0,0 +1,239 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsif.nxv1i1( + , + i64); + +define @intrinsic_vmsif_m_nxv1i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv1i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv1i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv1i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv2i1( + , + i64); + +define @intrinsic_vmsif_m_nxv2i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv2i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv2i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv2i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv4i1( + , + i64); + +define @intrinsic_vmsif_m_nxv4i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv4i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv4i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv4i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv8i1( + , + i64); + +define @intrinsic_vmsif_m_nxv8i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv8i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv8i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv8i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv16i1( + , + i64); + +define @intrinsic_vmsif_m_nxv16i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv16i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv16i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv16i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv32i1( + , + i64); + +define @intrinsic_vmsif_m_nxv32i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv32i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv32i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv32i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsif.nxv64i1( + , + i64); + +define @intrinsic_vmsif_m_nxv64i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsif.nxv64i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsif.mask.nxv64i1( + , + , + , + i64); + +define @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsif.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsif.mask.nxv64i1( + %0, + %1, + %2, + i64 %3) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv32.ll @@ -0,0 +1,239 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsof.nxv1i1( + , + i32); + +define @intrinsic_vmsof_m_nxv1i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv1i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv1i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv1i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv2i1( + , + i32); + +define @intrinsic_vmsof_m_nxv2i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv2i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv2i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv2i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv4i1( + , + i32); + +define @intrinsic_vmsof_m_nxv4i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv4i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv4i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv4i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv8i1( + , + i32); + +define @intrinsic_vmsof_m_nxv8i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv8i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv8i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv8i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv16i1( + , + i32); + +define @intrinsic_vmsof_m_nxv16i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv16i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv16i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv16i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv32i1( + , + i32); + +define @intrinsic_vmsof_m_nxv32i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv32i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv32i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv32i1( + %0, + %1, + %2, + i32 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv64i1( + , + i32); + +define @intrinsic_vmsof_m_nxv64i1( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv64i1( + %0, + i32 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv64i1( + , + , + , + i32); + +define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv64i1( + %0, + %1, + %2, + i32 %3) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsof-rv64.ll @@ -0,0 +1,239 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsof.nxv1i1( + , + i64); + +define @intrinsic_vmsof_m_nxv1i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv1i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv1i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv1i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv2i1( + , + i64); + +define @intrinsic_vmsof_m_nxv2i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv2i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv2i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv2i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv4i1( + , + i64); + +define @intrinsic_vmsof_m_nxv4i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv4i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv4i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1 +; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv4i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv8i1( + , + i64); + +define @intrinsic_vmsof_m_nxv8i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv8i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv8i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1 +; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv8i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv16i1( + , + i64); + +define @intrinsic_vmsof_m_nxv16i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv16i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv16i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1 +; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv16i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv32i1( + , + i64); + +define @intrinsic_vmsof_m_nxv32i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv32i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv32i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1 +; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv32i1( + %0, + %1, + %2, + i64 %3) + ret %a +} + +declare @llvm.riscv.vmsof.nxv64i1( + , + i64); + +define @intrinsic_vmsof_m_nxv64i1( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vmsof.nxv64i1( + %0, + i64 %1) + ret %a +} + +declare @llvm.riscv.vmsof.mask.nxv64i1( + , + , + , + i64); + +define @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1 +; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu +; CHECK: vmsof.m {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vmsof.mask.nxv64i1( + %0, + %1, + %2, + i64 %3) + ret %a +}