diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -122,5 +122,382 @@ TARGET_BUILTIN(__builtin_riscv_sm3p0, "LiLi", "nc", "zksh") TARGET_BUILTIN(__builtin_riscv_sm3p1, "LiLi", "nc", "zksh") +// P extension + +// add8 +TARGET_BUILTIN(__rv_add8, "ULiULiULi", "", "experimental-p") +// add16 +TARGET_BUILTIN(__rv_add16, "ULiULiULi", "", "experimental-p") +// ave +TARGET_BUILTIN(__rv_ave, "SLiSLiSLi", "", "experimental-p") +// bitrev +TARGET_BUILTIN(__rv_bitrev, "ULiULiULi", "", "experimental-p") +// bpick +TARGET_BUILTIN(__rv_bpick, "ULiULiULiULi", "", "experimental-p") +// clrs8 +TARGET_BUILTIN(__rv_clrs8, "ULiULi", "", "experimental-p") +// clrs16 +TARGET_BUILTIN(__rv_clrs16, "ULiULi", "", "experimental-p") +// clrs32 +TARGET_BUILTIN(__rv_clrs32, "ULiULi", "", "experimental-p") +// clz8 +TARGET_BUILTIN(__rv_clz8, "ULiULi", "", "experimental-p") +// clz16 +TARGET_BUILTIN(__rv_clz16, "ULiULi", "", "experimental-p") +// clz32 +TARGET_BUILTIN(__rv_clz32, "ULiULi", "", "experimental-p") +// cmpeq8 +TARGET_BUILTIN(__rv_cmpeq8, "ULiULiULi", "", "experimental-p") +// cmpeq16 +TARGET_BUILTIN(__rv_cmpeq16, "ULiULiULi", "", "experimental-p") +// cras16 +TARGET_BUILTIN(__rv_cras16, "ULiULiULi", "", "experimental-p") +// crsa16 +TARGET_BUILTIN(__rv_crsa16, "ULiULiULi", "", "experimental-p") +// insb +TARGET_BUILTIN(__rv_insb, "ULiULiULiULi", "", "experimental-p") +// kabs8 +TARGET_BUILTIN(__rv_kabs8, "ULiULi", "", "experimental-p") +// kabs16 +TARGET_BUILTIN(__rv_kabs16, "ULiULi", "", "experimental-p") +// kabsw +TARGET_BUILTIN(__rv_kabsw, "ULiSLi", "", "experimental-p") +// kadd8 +TARGET_BUILTIN(__rv_kadd8, "ULiULiULi", "", "experimental-p") +// kadd16 +TARGET_BUILTIN(__rv_kadd16, "ULiULiULi", "", "experimental-p") +// kaddh +TARGET_BUILTIN(__rv_kaddh, "LiLiLi", "", "experimental-p") +// kaddw +TARGET_BUILTIN(__rv_kaddw, "LiLiLi", "", "experimental-p") +// kcras16 +TARGET_BUILTIN(__rv_kcras16, "ULiULiULi", "", "experimental-p") +// kcrsa16 +TARGET_BUILTIN(__rv_kcrsa16, "ULiULiULi", "", "experimental-p") +// kdmbb +TARGET_BUILTIN(__rv_kdmbb, "LiULiULi", "", "experimental-p") +// kdmbt +TARGET_BUILTIN(__rv_kdmbt, "LiULiULi", "", "experimental-p") +// kdmtt +TARGET_BUILTIN(__rv_kdmtt, "LiULiULi", "", "experimental-p") +// kdmabb +TARGET_BUILTIN(__rv_kdmabb, "LiLiULiULi", "", "experimental-p") +// kdmabt +TARGET_BUILTIN(__rv_kdmabt, "LiLiULiULi", "", "experimental-p") +// kdmatt +TARGET_BUILTIN(__rv_kdmatt, "LiLiULiULi", "", "experimental-p") +// khm8 +TARGET_BUILTIN(__rv_khm8, "ULiULiULi", "", "experimental-p") +// khmx8 +TARGET_BUILTIN(__rv_khmx8, "ULiULiULi", "", "experimental-p") +// khm16 +TARGET_BUILTIN(__rv_khm16, "ULiULiULi", "", "experimental-p") +// khmx16 +TARGET_BUILTIN(__rv_khmx16, "ULiULiULi", "", "experimental-p") +// khmbb +TARGET_BUILTIN(__rv_khmbb, "LiULiULi", "", "experimental-p") +// khmbt +TARGET_BUILTIN(__rv_khmbt, "LiULiULi", "", "experimental-p") +// khmtt +TARGET_BUILTIN(__rv_khmtt, "LiULiULi", "", "experimental-p") +// kmabb +TARGET_BUILTIN(__rv_kmabb, "LiLiULiULi", "", "experimental-p") +// kmabt +TARGET_BUILTIN(__rv_kmabt, "LiLiULiULi", "", "experimental-p") +// kmatt +TARGET_BUILTIN(__rv_kmatt, "LiLiULiULi", "", "experimental-p") +// kmada +TARGET_BUILTIN(__rv_kmada, "LiLiULiULi", "", "experimental-p") +// kmaxda +TARGET_BUILTIN(__rv_kmaxda, "LiLiULiULi", "", "experimental-p") +// kmads +TARGET_BUILTIN(__rv_kmads, "LiLiULiULi", "", "experimental-p") +// kmadrs +TARGET_BUILTIN(__rv_kmadrs, "LiLiULiULi", "", "experimental-p") +// kmaxds +TARGET_BUILTIN(__rv_kmaxds, "LiLiULiULi", "", "experimental-p") +// kmda +TARGET_BUILTIN(__rv_kmda, "LiULiULi", "", "experimental-p") +// kmxda +TARGET_BUILTIN(__rv_kmxda, "LiULiULi", "", "experimental-p") +// kmmac +TARGET_BUILTIN(__rv_kmmac, "LiLiLiLi", "", "experimental-p") +// kmmac.u +TARGET_BUILTIN(__rv_kmmac_u, "LiLiLiLi", "", "experimental-p") +// kmmawb +TARGET_BUILTIN(__rv_kmmawb, "LiLiLiULi", "", "experimental-p") +// kmmawb_u +TARGET_BUILTIN(__rv_kmmawb_u, "LiLiLiULi", "", "experimental-p") +// kmmawb2 +TARGET_BUILTIN(__rv_kmmawb2, "LiLiLiULi", "", "experimental-p") +// kmmawb2_u +TARGET_BUILTIN(__rv_kmmawb2_u, "LiLiLiULi", "", "experimental-p") +// kmmawt +TARGET_BUILTIN(__rv_kmmawt, "LiLiLiULi", "", "experimental-p") +// kmmawt_u +TARGET_BUILTIN(__rv_kmmawt_u, "LiLiLiULi", "", "experimental-p") +// kmmawt2 +TARGET_BUILTIN(__rv_kmmawt2, "LiLiLiULi", "", "experimental-p") +// kmmawt2_u +TARGET_BUILTIN(__rv_kmmawt2_u, "LiLiLiULi", "", "experimental-p") +// kmmsb +TARGET_BUILTIN(__rv_kmmsb, "LiLiLiLi", "", "experimental-p") +// kmmsb.u +TARGET_BUILTIN(__rv_kmmsb_u, "LiLiLiLi", "", "experimental-p") +// kmmwb2 +TARGET_BUILTIN(__rv_kmmwb2, "LiLiULi", "", "experimental-p") +// kmmwb2_u +TARGET_BUILTIN(__rv_kmmwb2_u, "LiLiULi", "", "experimental-p") +// kmmwt2 +TARGET_BUILTIN(__rv_kmmwt2, "LiLiULi", "", "experimental-p") +// kmmwt2_u +TARGET_BUILTIN(__rv_kmmwt2_u, "LiLiULi", "", "experimental-p") +// kmsda +TARGET_BUILTIN(__rv_kmsda, "LiLiULiULi", "", "experimental-p") +// kmsxda +TARGET_BUILTIN(__rv_kmsxda, "LiLiULiULi", "", "experimental-p") +// ksllw +TARGET_BUILTIN(__rv_ksllw, "LiLiULi", "", "experimental-p") +// ksll8 +TARGET_BUILTIN(__rv_ksll8, "ULiULiULi", "", "experimental-p") +// ksll16 +TARGET_BUILTIN(__rv_ksll16, "ULiULiULi", "", "experimental-p") +// kslra8 +TARGET_BUILTIN(__rv_kslra8, "ULiULiULi", "", "experimental-p") +// kslra8_u +TARGET_BUILTIN(__rv_kslra8_u, "ULiULiULi", "", "experimental-p") +// kslra16 +TARGET_BUILTIN(__rv_kslra16, "ULiULiULi", "", "experimental-p") +// kslra16_u +TARGET_BUILTIN(__rv_kslra16_u, "ULiULiULi", "", "experimental-p") +// kslraw +TARGET_BUILTIN(__rv_kslraw, "LiLiLi", "", "experimental-p") +// kslraw_u +TARGET_BUILTIN(__rv_kslraw_u, "LiLiLi", "", "experimental-p") +// kstas16 +TARGET_BUILTIN(__rv_kstas16, "ULiULiULi", "", "experimental-p") +// kstsa16 +TARGET_BUILTIN(__rv_kstsa16, "ULiULiULi", "", "experimental-p") +// ksub8 +TARGET_BUILTIN(__rv_ksub8, "ULiULiULi", "", "experimental-p") +// ksub16 +TARGET_BUILTIN(__rv_ksub16, "ULiULiULi", "", "experimental-p") +// ksubh +TARGET_BUILTIN(__rv_ksubh, "LiLiLi", "", "experimental-p") +// ksubw +TARGET_BUILTIN(__rv_ksubw, "LiLiLi", "", "experimental-p") +// kwmmul +TARGET_BUILTIN(__rv_kwmmul, "LiLiLi", "", "experimental-p") +// kwmmul_u +TARGET_BUILTIN(__rv_kwmmul_u, "LiLiLi", "", "experimental-p") +// maxw +TARGET_BUILTIN(__rv_maxw, "LiLiLi", "", "experimental-p") +// minw +TARGET_BUILTIN(__rv_minw, "LiLiLi", "", "experimental-p") +// pbsad +TARGET_BUILTIN(__rv_pbsad, "ULiULiULi", "", "experimental-p") +// pbsada +TARGET_BUILTIN(__rv_pbsada, "ULiULiULiULi", "", "experimental-p") +// pkbb16 +TARGET_BUILTIN(__rv_pkbb16, "ULiULiULi", "", "experimental-p") +// pkbt16 +TARGET_BUILTIN(__rv_pkbt16, "ULiULiULi", "", "experimental-p") +// pktt16 +TARGET_BUILTIN(__rv_pktt16, "ULiULiULi", "", "experimental-p") +// pktb16 +TARGET_BUILTIN(__rv_pktb16, "ULiULiULi", "", "experimental-p") +// radd8 +TARGET_BUILTIN(__rv_radd8, "ULiULiULi", "", "experimental-p") +// radd16 +TARGET_BUILTIN(__rv_radd16, "ULiULiULi", "", "experimental-p") +// raddw +TARGET_BUILTIN(__rv_raddw, "LiLiLi", "", "experimental-p") +// rcras16 +TARGET_BUILTIN(__rv_rcras16, "ULiULiULi", "", "experimental-p") +// rcrsa16 +TARGET_BUILTIN(__rv_rcrsa16, "ULiULiULi", "", "experimental-p") +// rstas16 +TARGET_BUILTIN(__rv_rstas16, "ULiULiULi", "", "experimental-p") +// rstsa16 +TARGET_BUILTIN(__rv_rstsa16, "ULiULiULi", "", "experimental-p") +// rsub8 +TARGET_BUILTIN(__rv_rsub8, "ULiULiULi", "", "experimental-p") +// rsub16 +TARGET_BUILTIN(__rv_rsub16, "ULiULiULi", "", "experimental-p") +// rsubw +TARGET_BUILTIN(__rv_rsubw, "LiLiLi", "", "experimental-p") +// sclip8 +TARGET_BUILTIN(__rv_sclip8, "ULiULiULi", "", "experimental-p") +// sclip16 +TARGET_BUILTIN(__rv_sclip16, "ULiULiULi", "", "experimental-p") +// sclip32 +TARGET_BUILTIN(__rv_sclip32, "LiLiLi", "", "experimental-p") +// scmple8 +TARGET_BUILTIN(__rv_scmple8, "ULiULiULi", "", "experimental-p") +// scmple16 +TARGET_BUILTIN(__rv_scmple16, "ULiULiULi", "", "experimental-p") +// scmplt8 +TARGET_BUILTIN(__rv_scmplt8, "ULiULiULi", "", "experimental-p") +// scmplt16 +TARGET_BUILTIN(__rv_scmplt16, "ULiULiULi", "", "experimental-p") +// sll8 +TARGET_BUILTIN(__rv_sll8, "ULiULiULi", "", "experimental-p") +// sll16 +TARGET_BUILTIN(__rv_sll16, "ULiULiULi", "", "experimental-p") +// smaqa +TARGET_BUILTIN(__rv_smaqa, "LiLiULiULi", "", "experimental-p") +// smaqa_su +TARGET_BUILTIN(__rv_smaqa_su, "LiLiULiULi", "", "experimental-p") +// smax8 +TARGET_BUILTIN(__rv_smax8, "ULiULiULi", "", "experimental-p") +// smax16 +TARGET_BUILTIN(__rv_smax16, "ULiULiULi", "", "experimental-p") +// smbb16 +TARGET_BUILTIN(__rv_smbb16, "LiULiULi", "", "experimental-p") +// smbt16 +TARGET_BUILTIN(__rv_smbt16, "LiULiULi", "", "experimental-p") +// smtt16 +TARGET_BUILTIN(__rv_smtt16, "LiULiULi", "", "experimental-p") +// smds +TARGET_BUILTIN(__rv_smds, "LiULiULi", "", "experimental-p") +// smdrs +TARGET_BUILTIN(__rv_smdrs, "LiULiULi", "", "experimental-p") +// smxds +TARGET_BUILTIN(__rv_smxds, "LiULiULi", "", "experimental-p") +// smin8 +TARGET_BUILTIN(__rv_smin8, "ULiULiULi", "", "experimental-p") +// smin16 +TARGET_BUILTIN(__rv_smin16, "ULiULiULi", "", "experimental-p") +// smmul +TARGET_BUILTIN(__rv_smmul, "LiLiLi", "", "experimental-p") +// smmul_u +TARGET_BUILTIN(__rv_smmul_u, "LiLiLi", "", "experimental-p") +// smmwb +TARGET_BUILTIN(__rv_smmwb, "LiLiULi", "", "experimental-p") +// smmwb_u +TARGET_BUILTIN(__rv_smmwb_u, "LiLiULi", "", "experimental-p") +// smmwt +TARGET_BUILTIN(__rv_smmwt, "LiLiULi", "", "experimental-p") +// smmwt_u +TARGET_BUILTIN(__rv_smmwt_u, "LiLiULi", "", "experimental-p") +// sra_u +TARGET_BUILTIN(__rv_sra_u, "LiLiULi", "", "experimental-p") +// sra8 +TARGET_BUILTIN(__rv_sra8, "ULiULiULi", "", "experimental-p") +// sra8_u +TARGET_BUILTIN(__rv_sra8_u, "ULiULiULi", "", "experimental-p") +// sra16 +TARGET_BUILTIN(__rv_sra16, "ULiULiULi", "", "experimental-p") +// sra16_u +TARGET_BUILTIN(__rv_sra16_u, "ULiULiULi", "", "experimental-p") +// srl8 +TARGET_BUILTIN(__rv_srl8, "ULiULiULi", "", "experimental-p") +// srl8_u +TARGET_BUILTIN(__rv_srl8_u, "ULiULiULi", "", "experimental-p") +// srl16 +TARGET_BUILTIN(__rv_srl16, "ULiULiULi", "", "experimental-p") +// srl16_u +TARGET_BUILTIN(__rv_srl16_u, "ULiULiULi", "", "experimental-p") +// stas16 +TARGET_BUILTIN(__rv_stas16, "ULiULiULi", "", "experimental-p") +// stsa16 +TARGET_BUILTIN(__rv_stsa16, "ULiULiULi", "", "experimental-p") +// sub8 +TARGET_BUILTIN(__rv_sub8, "ULiULiULi", "", "experimental-p") +// sub16 +TARGET_BUILTIN(__rv_sub16, "ULiULiULi", "", "experimental-p") +// sunpkd810 +TARGET_BUILTIN(__rv_sunpkd810, "ULiULi", "", "experimental-p") +// sunpkd820 +TARGET_BUILTIN(__rv_sunpkd820, "ULiULi", "", "experimental-p") +// sunpkd830 +TARGET_BUILTIN(__rv_sunpkd830, "ULiULi", "", "experimental-p") +// sunpkd831 +TARGET_BUILTIN(__rv_sunpkd831, "ULiULi", "", "experimental-p") +// sunpkd832 +TARGET_BUILTIN(__rv_sunpkd832, "ULiULi", "", "experimental-p") +// swap8 +TARGET_BUILTIN(__rv_swap8, "ULiULi", "", "experimental-p") +// swap16 +TARGET_BUILTIN(__rv_swap16, "ULiULi", "", "experimental-p") +// uclip8 +TARGET_BUILTIN(__rv_uclip8, "ULiULiULi", "", "experimental-p") +// uclip16 +TARGET_BUILTIN(__rv_uclip16, "ULiULiULi", "", "experimental-p") +// uclip32 +TARGET_BUILTIN(__rv_uclip32, "LiLiLi", "", "experimental-p") +// ucmple8 +TARGET_BUILTIN(__rv_ucmple8, "ULiULiULi", "", "experimental-p") +// ucmple16 +TARGET_BUILTIN(__rv_ucmple16, "ULiULiULi", "", "experimental-p") +// ucmplt8 +TARGET_BUILTIN(__rv_ucmplt8, "ULiULiULi", "", "experimental-p") +// ucmplt16 +TARGET_BUILTIN(__rv_ucmplt16, "ULiULiULi", "", "experimental-p") +// ukadd8 +TARGET_BUILTIN(__rv_ukadd8, "ULiULiULi", "", "experimental-p") +// ukadd16 +TARGET_BUILTIN(__rv_ukadd16, "ULiULiULi", "", "experimental-p") +// ukaddh +TARGET_BUILTIN(__rv_ukaddh, "ULiULiULi", "", "experimental-p") +// ukaddw +TARGET_BUILTIN(__rv_ukaddw, "ULiULiULi", "", "experimental-p") +// ukcras16 +TARGET_BUILTIN(__rv_ukcras16, "ULiULiULi", "", "experimental-p") +// ukcrsa16 +TARGET_BUILTIN(__rv_ukcrsa16, "ULiULiULi", "", "experimental-p") +// ukstas16 +TARGET_BUILTIN(__rv_ukstas16, "ULiULiULi", "", "experimental-p") +// ukstsa16 +TARGET_BUILTIN(__rv_ukstsa16, "ULiULiULi", "", "experimental-p") +// uksub8 +TARGET_BUILTIN(__rv_uksub8, "ULiULiULi", "", "experimental-p") +// uksub16 +TARGET_BUILTIN(__rv_uksub16, "ULiULiULi", "", "experimental-p") +// uksubh +TARGET_BUILTIN(__rv_uksubh, "LiLiLi", "", "experimental-p") +// uksubw +TARGET_BUILTIN(__rv_uksubw, "LiLiLi", "", "experimental-p") +// umaqa +TARGET_BUILTIN(__rv_umaqa, "ULiULiULiULi", "", "experimental-p") +// umax8 +TARGET_BUILTIN(__rv_umax8, "ULiULiULi", "", "experimental-p") +// umax16 +TARGET_BUILTIN(__rv_umax16, "ULiULiULi", "", "experimental-p") +// umin8 +TARGET_BUILTIN(__rv_umin8, "ULiULiULi", "", "experimental-p") +// umin16 +TARGET_BUILTIN(__rv_umin16, "ULiULiULi", "", "experimental-p") +// uradd8 +TARGET_BUILTIN(__rv_uradd8, "ULiULiULi", "", "experimental-p") +// uradd16 +TARGET_BUILTIN(__rv_uradd16, "ULiULiULi", "", "experimental-p") +// uraddw +TARGET_BUILTIN(__rv_uraddw, "LiLiLi", "", "experimental-p") +// urcras16 +TARGET_BUILTIN(__rv_urcras16, "ULiULiULi", "", "experimental-p") +// urcrsa16 +TARGET_BUILTIN(__rv_urcrsa16, "ULiULiULi", "", "experimental-p") +// urstas16 +TARGET_BUILTIN(__rv_urstas16, "ULiULiULi", "", "experimental-p") +// urstsa16 +TARGET_BUILTIN(__rv_urstsa16, "ULiULiULi", "", "experimental-p") +// ursub8 +TARGET_BUILTIN(__rv_ursub8, "ULiULiULi", "", "experimental-p") +// ursub16 +TARGET_BUILTIN(__rv_ursub16, "ULiULiULi", "", "experimental-p") +// ursubw +TARGET_BUILTIN(__rv_ursubw, "LiLiLi", "", "experimental-p") +// zunpkd810 +TARGET_BUILTIN(__rv_zunpkd810, "ULiULi", "", "experimental-p") +// zunpkd820 +TARGET_BUILTIN(__rv_zunpkd820, "ULiULi", "", "experimental-p") +// zunpkd830 +TARGET_BUILTIN(__rv_zunpkd830, "ULiULi", "", "experimental-p") +// zunpkd831 +TARGET_BUILTIN(__rv_zunpkd831, "ULiULi", "", "experimental-p") +// zunpkd832 +TARGET_BUILTIN(__rv_zunpkd832, "ULiULi", "", "experimental-p") + #undef BUILTIN #undef TARGET_BUILTIN diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -19397,6 +19397,437 @@ IntrinsicTypes = {ResultType}; break; + // P extension +#define BUILTIN_ID(NAME) \ + case RISCV::BI__rv_##NAME: \ + ID = Intrinsic::riscv_##NAME; \ + break; + + // Intrinsic type is obtained from Ops[0]. + case RISCV::BI__rv_add8: + case RISCV::BI__rv_add16: + case RISCV::BI__rv_ave: + case RISCV::BI__rv_bitrev: + case RISCV::BI__rv_bpick: + case RISCV::BI__rv_clrs8: + case RISCV::BI__rv_clrs16: + case RISCV::BI__rv_clrs32: + case RISCV::BI__rv_clz8: + case RISCV::BI__rv_clz16: + case RISCV::BI__rv_clz32: + case RISCV::BI__rv_cmpeq8: + case RISCV::BI__rv_cmpeq16: + case RISCV::BI__rv_cras16: + case RISCV::BI__rv_crsa16: + case RISCV::BI__rv_insb: + case RISCV::BI__rv_kabs8: + case RISCV::BI__rv_kabs16: + case RISCV::BI__rv_kabsw: + case RISCV::BI__rv_kadd8: + case RISCV::BI__rv_kadd16: + case RISCV::BI__rv_kaddh: + case RISCV::BI__rv_kaddw: + case RISCV::BI__rv_kcras16: + case RISCV::BI__rv_kcrsa16: + case RISCV::BI__rv_khm8: + case RISCV::BI__rv_khm16: + case RISCV::BI__rv_khmx8: + case RISCV::BI__rv_khmx16: + case RISCV::BI__rv_kmmac: + case RISCV::BI__rv_kmmac_u: + case RISCV::BI__rv_kmmsb: + case RISCV::BI__rv_kmmsb_u: + case RISCV::BI__rv_ksllw: + case RISCV::BI__rv_kslraw: + case RISCV::BI__rv_kslraw_u: + case RISCV::BI__rv_kstas16: + case RISCV::BI__rv_kstsa16: + case RISCV::BI__rv_ksub8: + case RISCV::BI__rv_ksub16: + case RISCV::BI__rv_ksubh: + case RISCV::BI__rv_ksubw: + case RISCV::BI__rv_kwmmul: + case RISCV::BI__rv_kwmmul_u: + case RISCV::BI__rv_maxw: + case RISCV::BI__rv_minw: + case RISCV::BI__rv_pkbb16: + case RISCV::BI__rv_pkbt16: + case RISCV::BI__rv_pktt16: + case RISCV::BI__rv_pktb16: + case RISCV::BI__rv_radd8: + case RISCV::BI__rv_radd16: + case RISCV::BI__rv_raddw: + case RISCV::BI__rv_rcras16: + case RISCV::BI__rv_rcrsa16: + case RISCV::BI__rv_rstas16: + case RISCV::BI__rv_rstsa16: + case RISCV::BI__rv_rsub8: + case RISCV::BI__rv_rsub16: + case RISCV::BI__rv_rsubw: + case RISCV::BI__rv_scmple8: + case RISCV::BI__rv_scmple16: + case RISCV::BI__rv_scmplt8: + case RISCV::BI__rv_scmplt16: + case RISCV::BI__rv_smax8: + case RISCV::BI__rv_smax16: + case RISCV::BI__rv_smin8: + case RISCV::BI__rv_smin16: + case RISCV::BI__rv_smmul: + case RISCV::BI__rv_smmul_u: + case RISCV::BI__rv_stas16: + case RISCV::BI__rv_stsa16: + case RISCV::BI__rv_sub8: + case RISCV::BI__rv_sub16: + case RISCV::BI__rv_swap8: + case RISCV::BI__rv_swap16: + case RISCV::BI__rv_ucmple8: + case RISCV::BI__rv_ucmple16: + case RISCV::BI__rv_ucmplt8: + case RISCV::BI__rv_ucmplt16: + case RISCV::BI__rv_ukadd8: + case RISCV::BI__rv_ukadd16: + case RISCV::BI__rv_ukaddh: + case RISCV::BI__rv_ukaddw: + case RISCV::BI__rv_ukcras16: + case RISCV::BI__rv_ukcrsa16: + case RISCV::BI__rv_ukstas16: + case RISCV::BI__rv_ukstsa16: + case RISCV::BI__rv_uksub8: + case RISCV::BI__rv_uksub16: + case RISCV::BI__rv_uksubh: + case RISCV::BI__rv_uksubw: + case RISCV::BI__rv_umax8: + case RISCV::BI__rv_umax16: + case RISCV::BI__rv_umin8: + case RISCV::BI__rv_umin16: + case RISCV::BI__rv_uradd8: + case RISCV::BI__rv_uradd16: + case RISCV::BI__rv_uraddw: + case RISCV::BI__rv_urcras16: + case RISCV::BI__rv_urcrsa16: + case RISCV::BI__rv_urstas16: + case RISCV::BI__rv_urstsa16: + case RISCV::BI__rv_ursub8: + case RISCV::BI__rv_ursub16: + case RISCV::BI__rv_ursubw: { + switch (BuiltinID) { + default: + llvm_unreachable("unexpected builtin ID"); + BUILTIN_ID(add8) + BUILTIN_ID(add16) + BUILTIN_ID(ave) + BUILTIN_ID(bitrev) + BUILTIN_ID(bpick) + BUILTIN_ID(clrs8) + BUILTIN_ID(clrs16) + BUILTIN_ID(clrs32) + BUILTIN_ID(clz8) + BUILTIN_ID(clz16) + BUILTIN_ID(clz32) + BUILTIN_ID(cmpeq8) + BUILTIN_ID(cmpeq16) + BUILTIN_ID(cras16) + BUILTIN_ID(crsa16) + BUILTIN_ID(insb) + BUILTIN_ID(kabs8) + BUILTIN_ID(kabs16) + BUILTIN_ID(kabsw) + BUILTIN_ID(kadd8) + BUILTIN_ID(kadd16) + BUILTIN_ID(kaddh) + BUILTIN_ID(kaddw) + BUILTIN_ID(kcras16) + BUILTIN_ID(kcrsa16) + BUILTIN_ID(khm8) + BUILTIN_ID(khm16) + BUILTIN_ID(khmx8) + BUILTIN_ID(khmx16) + BUILTIN_ID(kmmac) + BUILTIN_ID(kmmac_u) + BUILTIN_ID(kmmsb) + BUILTIN_ID(kmmsb_u) + BUILTIN_ID(ksllw) + BUILTIN_ID(kslraw) + BUILTIN_ID(kslraw_u) + BUILTIN_ID(kstas16) + BUILTIN_ID(kstsa16) + BUILTIN_ID(ksub8) + BUILTIN_ID(ksub16) + BUILTIN_ID(ksubh) + BUILTIN_ID(ksubw) + BUILTIN_ID(kwmmul) + BUILTIN_ID(kwmmul_u) + BUILTIN_ID(maxw) + BUILTIN_ID(minw) + BUILTIN_ID(pkbb16) + BUILTIN_ID(pkbt16) + BUILTIN_ID(pktt16) + BUILTIN_ID(pktb16) + BUILTIN_ID(radd8) + BUILTIN_ID(radd16) + BUILTIN_ID(raddw) + BUILTIN_ID(rcras16) + BUILTIN_ID(rcrsa16) + BUILTIN_ID(rstas16) + BUILTIN_ID(rstsa16) + BUILTIN_ID(rsub8) + BUILTIN_ID(rsub16) + BUILTIN_ID(rsubw) + BUILTIN_ID(scmple8) + BUILTIN_ID(scmple16) + BUILTIN_ID(scmplt8) + BUILTIN_ID(scmplt16) + BUILTIN_ID(smax8) + BUILTIN_ID(smax16) + BUILTIN_ID(smin8) + BUILTIN_ID(smin16) + BUILTIN_ID(smmul) + BUILTIN_ID(smmul_u) + BUILTIN_ID(stas16) + BUILTIN_ID(stsa16) + BUILTIN_ID(sub8) + BUILTIN_ID(sub16) + BUILTIN_ID(swap8) + BUILTIN_ID(swap16) + BUILTIN_ID(ucmple8) + BUILTIN_ID(ucmple16) + BUILTIN_ID(ucmplt8) + BUILTIN_ID(ucmplt16) + BUILTIN_ID(ukadd8) + BUILTIN_ID(ukadd16) + BUILTIN_ID(ukaddh) + BUILTIN_ID(ukaddw) + BUILTIN_ID(ukcras16) + BUILTIN_ID(ukcrsa16) + BUILTIN_ID(ukstas16) + BUILTIN_ID(ukstsa16) + BUILTIN_ID(uksub8) + BUILTIN_ID(uksub16) + BUILTIN_ID(uksubh) + BUILTIN_ID(uksubw) + BUILTIN_ID(umax8) + BUILTIN_ID(umax16) + BUILTIN_ID(umin8) + BUILTIN_ID(umin16) + BUILTIN_ID(uradd8) + BUILTIN_ID(uradd16) + BUILTIN_ID(uraddw) + BUILTIN_ID(urcras16) + BUILTIN_ID(urcrsa16) + BUILTIN_ID(urstas16) + BUILTIN_ID(urstsa16) + BUILTIN_ID(ursub8) + BUILTIN_ID(ursub16) + BUILTIN_ID(ursubw) + } + + IntrinsicTypes = {Ops[0]->getType()}; + break; + } + + // Intrinsic type is obtained from ResultType. + case RISCV::BI__rv_sunpkd810: + case RISCV::BI__rv_sunpkd820: + case RISCV::BI__rv_sunpkd830: + case RISCV::BI__rv_sunpkd831: + case RISCV::BI__rv_sunpkd832: + case RISCV::BI__rv_zunpkd810: + case RISCV::BI__rv_zunpkd820: + case RISCV::BI__rv_zunpkd830: + case RISCV::BI__rv_zunpkd831: + case RISCV::BI__rv_zunpkd832: { + switch (BuiltinID) { + default: + llvm_unreachable("unexpected builtin ID"); + BUILTIN_ID(sunpkd810) + BUILTIN_ID(sunpkd820) + BUILTIN_ID(sunpkd830) + BUILTIN_ID(sunpkd831) + BUILTIN_ID(sunpkd832) + BUILTIN_ID(zunpkd810) + BUILTIN_ID(zunpkd820) + BUILTIN_ID(zunpkd830) + BUILTIN_ID(zunpkd831) + BUILTIN_ID(zunpkd832) + } + + IntrinsicTypes = {ResultType}; + break; + } + + // Intrinsic type is obtained from ResultType and Ops[0]. + case RISCV::BI__rv_kdmbb: + case RISCV::BI__rv_kdmbt: + case RISCV::BI__rv_kdmtt: + case RISCV::BI__rv_khmbb: + case RISCV::BI__rv_khmbt: + case RISCV::BI__rv_khmtt: + case RISCV::BI__rv_kmda: + case RISCV::BI__rv_kmxda: + case RISCV::BI__rv_pbsad: + case RISCV::BI__rv_smbb16: + case RISCV::BI__rv_smbt16: + case RISCV::BI__rv_smtt16: + case RISCV::BI__rv_smds: + case RISCV::BI__rv_smdrs: + case RISCV::BI__rv_smxds: { + switch (BuiltinID) { + default: + llvm_unreachable("unexpected builtin ID"); + BUILTIN_ID(kdmbb) + BUILTIN_ID(kdmbt) + BUILTIN_ID(kdmtt) + BUILTIN_ID(khmbb) + BUILTIN_ID(khmbt) + BUILTIN_ID(khmtt) + BUILTIN_ID(kmda) + BUILTIN_ID(kmxda) + BUILTIN_ID(pbsad) + BUILTIN_ID(smbb16) + BUILTIN_ID(smbt16) + BUILTIN_ID(smtt16) + BUILTIN_ID(smds) + BUILTIN_ID(smdrs) + BUILTIN_ID(smxds) + } + + IntrinsicTypes = {ResultType, Ops[0]->getType()}; + break; + } + + // Intrinsic type is obtained from ResultType and Ops[1]. + case RISCV::BI__rv_kdmabb: + case RISCV::BI__rv_kdmabt: + case RISCV::BI__rv_kdmatt: + case RISCV::BI__rv_kmabb: + case RISCV::BI__rv_kmabt: + case RISCV::BI__rv_kmatt: + case RISCV::BI__rv_kmada: + case RISCV::BI__rv_kmaxda: + case RISCV::BI__rv_kmads: + case RISCV::BI__rv_kmadrs: + case RISCV::BI__rv_kmaxds: + case RISCV::BI__rv_kmmwb2: + case RISCV::BI__rv_kmmwb2_u: + case RISCV::BI__rv_kmmwt2: + case RISCV::BI__rv_kmmwt2_u: + case RISCV::BI__rv_kmsda: + case RISCV::BI__rv_kmsxda: + case RISCV::BI__rv_ksll8: + case RISCV::BI__rv_ksll16: + case RISCV::BI__rv_kslra8: + case RISCV::BI__rv_kslra8_u: + case RISCV::BI__rv_kslra16: + case RISCV::BI__rv_kslra16_u: + case RISCV::BI__rv_pbsada: + case RISCV::BI__rv_sclip8: + case RISCV::BI__rv_sclip16: + case RISCV::BI__rv_sclip32: + case RISCV::BI__rv_sll8: + case RISCV::BI__rv_sll16: + case RISCV::BI__rv_smaqa: + case RISCV::BI__rv_smaqa_su: + case RISCV::BI__rv_smmwb: + case RISCV::BI__rv_smmwb_u: + case RISCV::BI__rv_smmwt: + case RISCV::BI__rv_smmwt_u: + case RISCV::BI__rv_sra_u: + case RISCV::BI__rv_sra8: + case RISCV::BI__rv_sra8_u: + case RISCV::BI__rv_sra16: + case RISCV::BI__rv_sra16_u: + case RISCV::BI__rv_srl8: + case RISCV::BI__rv_srl8_u: + case RISCV::BI__rv_srl16: + case RISCV::BI__rv_srl16_u: + case RISCV::BI__rv_uclip8: + case RISCV::BI__rv_uclip16: + case RISCV::BI__rv_uclip32: + case RISCV::BI__rv_umaqa: { + switch (BuiltinID) { + default: + llvm_unreachable("unexpected builtin ID"); + BUILTIN_ID(kdmabb) + BUILTIN_ID(kdmabt) + BUILTIN_ID(kdmatt) + BUILTIN_ID(kmabb) + BUILTIN_ID(kmabt) + BUILTIN_ID(kmatt) + BUILTIN_ID(kmada) + BUILTIN_ID(kmaxda) + BUILTIN_ID(kmads) + BUILTIN_ID(kmadrs) + BUILTIN_ID(kmaxds) + BUILTIN_ID(kmmwb2) + BUILTIN_ID(kmmwb2_u) + BUILTIN_ID(kmmwt2) + BUILTIN_ID(kmmwt2_u) + BUILTIN_ID(kmsda) + BUILTIN_ID(kmsxda) + BUILTIN_ID(ksll8) + BUILTIN_ID(ksll16) + BUILTIN_ID(kslra8) + BUILTIN_ID(kslra8_u) + BUILTIN_ID(kslra16) + BUILTIN_ID(kslra16_u) + BUILTIN_ID(pbsada) + BUILTIN_ID(sclip8) + BUILTIN_ID(sclip16) + BUILTIN_ID(sclip32) + BUILTIN_ID(sll8) + BUILTIN_ID(sll16) + BUILTIN_ID(smaqa) + BUILTIN_ID(smaqa_su) + BUILTIN_ID(smmwb) + BUILTIN_ID(smmwb_u) + BUILTIN_ID(smmwt) + BUILTIN_ID(smmwt_u) + BUILTIN_ID(sra_u) + BUILTIN_ID(sra8) + BUILTIN_ID(sra8_u) + BUILTIN_ID(sra16) + BUILTIN_ID(sra16_u) + BUILTIN_ID(srl8) + BUILTIN_ID(srl8_u) + BUILTIN_ID(srl16) + BUILTIN_ID(srl16_u) + BUILTIN_ID(uclip8) + BUILTIN_ID(uclip16) + BUILTIN_ID(uclip32) + BUILTIN_ID(umaqa) + } + + IntrinsicTypes = {ConvertType(E->getType()), Ops[1]->getType()}; + break; + } + + // Intrinsic type is obtained from ResultType and Ops[2]. + case RISCV::BI__rv_kmmawb: + case RISCV::BI__rv_kmmawb_u: + case RISCV::BI__rv_kmmawb2: + case RISCV::BI__rv_kmmawb2_u: + case RISCV::BI__rv_kmmawt: + case RISCV::BI__rv_kmmawt_u: + case RISCV::BI__rv_kmmawt2: + case RISCV::BI__rv_kmmawt2_u: { + switch (BuiltinID) { + default: + llvm_unreachable("unexpected builtin ID"); + BUILTIN_ID(kmmawb) + BUILTIN_ID(kmmawb_u) + BUILTIN_ID(kmmawb2) + BUILTIN_ID(kmmawb2_u) + BUILTIN_ID(kmmawt) + BUILTIN_ID(kmmawt_u) + BUILTIN_ID(kmmawt2) + BUILTIN_ID(kmmawt2_u) + } + + IntrinsicTypes = {ResultType, Ops[2]->getType()}; + break; + } +#undef BUILTIN_ID + // Vector builtins are handled from here. #include "clang/Basic/riscv_vector_builtin_cg.inc" } diff --git a/clang/test/CodeGen/RISCV/rvp-intrinsics/rv32p.c b/clang/test/CodeGen/RISCV/rvp-intrinsics/rv32p.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvp-intrinsics/rv32p.c @@ -0,0 +1,1692 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv32 -O2 -target-feature +experimental-p \ +// RUN: -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV32 %s + +typedef signed char int8x4_t __attribute((vector_size(4))); +typedef short int16x2_t __attribute((vector_size(4))); +typedef unsigned char uint8x4_t __attribute__((vector_size(4))); +typedef unsigned short uint16x2_t __attribute__((vector_size(4))); + +// CHECK-RV32-LABEL: @add8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.add8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long add8(unsigned long a, unsigned long b) { + return __rv_add8(a, b); +} + +// CHECK-RV32-LABEL: @add16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.add16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long add16(unsigned long a, unsigned long b) { + return __rv_add16(a, b); +} + +// CHECK-RV32-LABEL: @ave( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ave.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long ave(long a, long b) { + return __rv_ave(a, b); +} + +// CHECK-RV32-LABEL: @bitrev( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.bitrev.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long bitrev(unsigned long a, unsigned long b) { + return __rv_bitrev(a, b); +} + +// CHECK-RV32-LABEL: @bpick( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.bpick.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long bpick(unsigned long a, unsigned long b, unsigned long c) { + return __rv_bpick(a, b, c); +} + +// CHECK-RV32-LABEL: @clrs8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.clrs8.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long clrs8(unsigned long a) { + return __rv_clrs8(a); +} + +// CHECK-RV32-LABEL: @clrs16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.clrs16.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long clrs16(unsigned long a) { + return __rv_clrs16(a); +} + +// CHECK-RV32-LABEL: @clrs32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.clrs32.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long clrs32(unsigned long a) { + return __rv_clrs32(a); +} + +// CHECK-RV32-LABEL: @clz8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.clz8.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long clz8(unsigned long a) { + return __rv_clz8(a); +} + +// CHECK-RV32-LABEL: @clz16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.clz16.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long clz16(unsigned long a) { + return __rv_clz16(a); +} + +// CHECK-RV32-LABEL: @clz32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.clz32.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long clz32(unsigned long a) { + return __rv_clz32(a); +} + +// CHECK-RV32-LABEL: @cmpeq8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.cmpeq8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long cmpeq8(unsigned long a, unsigned long b) { + return __rv_cmpeq8(a, b); +} + +// CHECK-RV32-LABEL: @cmpeq16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.cmpeq16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long cmpeq16(unsigned long a, unsigned long b) { + return __rv_cmpeq16(a, b); +} + +// CHECK-RV32-LABEL: @cras16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.cras16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long cras16(unsigned long a, unsigned long b) { + return __rv_cras16(a, b); +} + +// CHECK-RV32-LABEL: @crsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.crsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long crsa16(unsigned long a, unsigned long b) { + return __rv_crsa16(a, b); +} + +// CHECK-RV32-LABEL: @insb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.insb.i32(i32 [[A:%.*]], i32 [[B:%.*]], i32 3) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long insb(unsigned long a, unsigned long b) { + return __rv_insb(a, b, 3); +} + +// CHECK-RV32-LABEL: @kabs8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kabs8.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kabs8(unsigned long a) { + return __rv_kabs8(a); +} + +// CHECK-RV32-LABEL: @kabs16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kabs16.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kabs16(unsigned long a) { + return __rv_kabs16(a); +} + +// CHECK-RV32-LABEL: @kabsw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kabsw.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kabsw(long a) { + return __rv_kabsw(a); +} + +// CHECK-RV32-LABEL: @kadd8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kadd8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kadd8(unsigned long a, unsigned long b) { + return __rv_kadd8(a, b); +} + +// CHECK-RV32-LABEL: @kadd16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kadd16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kadd16(unsigned long a, unsigned long b) { + return __rv_kadd16(a, b); +} + +// CHECK-RV32-LABEL: @kaddh( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kaddh.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kaddh(int a, int b) { + return __rv_kaddh(a, b); +} + +// CHECK-RV32-LABEL: @kaddw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kaddw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kaddw(int a, int b) { + return __rv_kaddw(a, b); +} + +// CHECK-RV32-LABEL: @kcras16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kcras16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kcras16(unsigned long a, unsigned long b) { + return __rv_kcras16(a, b); +} + +// CHECK-RV32-LABEL: @kcrsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kcrsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kcrsa16(unsigned long a, unsigned long b) { + return __rv_kcrsa16(a, b); +} + +// CHECK-RV32-LABEL: @kdmbb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kdmbb.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kdmbb(unsigned int a, unsigned int b) { + return __rv_kdmbb(a, b); +} + +// CHECK-RV32-LABEL: @kdmbt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kdmbt.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kdmbt(unsigned int a, unsigned int b) { + return __rv_kdmbt(a, b); +} + +// CHECK-RV32-LABEL: @kdmtt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kdmtt.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kdmtt(unsigned int a, unsigned int b) { + return __rv_kdmtt(a, b); +} + +// CHECK-RV32-LABEL: @kdmabb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kdmabb.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kdmabb(long t, unsigned int a, unsigned int b) { + return __rv_kdmabb(t, a, b); +} + +// CHECK-RV32-LABEL: @kdmabt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kdmabt.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kdmabt(long t, unsigned int a, unsigned int b) { + return __rv_kdmabt(t, a, b); +} + +// CHECK-RV32-LABEL: @kdmatt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kdmatt.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kdmatt(long t, unsigned int a, unsigned int b) { + return __rv_kdmatt(t, a, b); +} + +// CHECK-RV32-LABEL: @khm8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khm8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long khm8(unsigned long a, unsigned long b) { + return __rv_khm8(a, b); +} + +// CHECK-RV32-LABEL: @khmx8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khmx8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long khmx8(unsigned long a, unsigned long b) { + return __rv_khmx8(a, b); +} + +// CHECK-RV32-LABEL: @khm16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khm16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long khm16(unsigned long a, unsigned long b) { + return __rv_khm16(a, b); +} + +// CHECK-RV32-LABEL: @khmx16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khmx16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long khmx16(unsigned long a, unsigned long b) { + return __rv_khmx16(a, b); +} + +// CHECK-RV32-LABEL: @khmbb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khmbb.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long khmbb(unsigned int a, unsigned int b) { + return __rv_khmbb(a, b); +} + +// CHECK-RV32-LABEL: @khmbt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khmbt.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long khmbt(unsigned int a, unsigned int b) { + return __rv_khmbt(a, b); +} + +// CHECK-RV32-LABEL: @khmtt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.khmtt.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long khmtt(unsigned int a, unsigned int b) { + return __rv_khmtt(a, b); +} + +// CHECK-RV32-LABEL: @kmabb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmabb.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmabb(long t, unsigned long a, unsigned long b) { + return __rv_kmabb(t, a, b); +} + +// CHECK-RV32-LABEL: @kmabt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmabt.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmabt(long t, unsigned long a, unsigned long b) { + return __rv_kmabt(t, a, b); +} + +// CHECK-RV32-LABEL: @kmatt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmatt.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmatt(long t, unsigned long a, unsigned long b) { + return __rv_kmatt(t, a, b); +} + +// CHECK-RV32-LABEL: @kmada( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmada.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmada(long t, unsigned long a, unsigned long b) { + return __rv_kmada(t, a, b); +} + +// CHECK-RV32-LABEL: @kmaxda( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmaxda.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmaxda(long t, unsigned long a, unsigned long b) { + return __rv_kmaxda(t, a, b); +} + +// CHECK-RV32-LABEL: @kmads( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmads.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmads(long t, unsigned long a, unsigned long b) { + return __rv_kmads(t, a, b); +} + +// CHECK-RV32-LABEL: @kmadrs( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmadrs.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmadrs(long t, unsigned long a, unsigned long b) { + return __rv_kmadrs(t, a, b); +} + +// CHECK-RV32-LABEL: @kmaxds( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmaxds.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmaxds(long t, unsigned long a, unsigned long b) { + return __rv_kmaxds(t, a, b); +} + +// CHECK-RV32-LABEL: @kmda( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmda.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmda(unsigned long a, unsigned long b) { + return __rv_kmda(a, b); +} + +// CHECK-RV32-LABEL: @kmxda( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmxda.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmxda(unsigned long a, unsigned long b) { + return __rv_kmxda(a, b); +} + +// CHECK-RV32-LABEL: @kmmac( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmac.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmac(long t, long a, long b) { + return __rv_kmmac(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmac_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmac.u.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmac_u(long t, long a, long b) { + return __rv_kmmac_u(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawb.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawb(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawb_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawb.u.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawb_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb_u(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawb2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawb2.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawb2(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb2(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawb2_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawb2.u.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawb2_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb2_u(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawt.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawt(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawt_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawt.u.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawt_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt_u(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawt2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawt2.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawt2(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt2(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmawt2_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmawt2.u.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmawt2_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt2_u(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmsb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmsb.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmsb(long t, long a, long b) { + return __rv_kmmsb(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmsb_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmsb.u.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmsb_u(long t, long a, long b) { + return __rv_kmmsb_u(t, a, b); +} + +// CHECK-RV32-LABEL: @kmmwb2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmwb2.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmwb2(long a, unsigned long b) { + return __rv_kmmwb2(a, b); +} + +// CHECK-RV32-LABEL: @kmmwb2_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmwb2.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmwb2_u(long a, unsigned long b) { + return __rv_kmmwb2_u(a, b); +} + +// CHECK-RV32-LABEL: @kmmwt2( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmwt2.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmwt2(long a, unsigned long b) { + return __rv_kmmwt2(a, b); +} + +// CHECK-RV32-LABEL: @kmmwt2_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmmwt2.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmmwt2_u(long a, unsigned long b) { + return __rv_kmmwt2_u(a, b); +} + +// CHECK-RV32-LABEL: @kmsda( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmsda.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmsda(long t, unsigned long a, unsigned long b) { + return __rv_kmsda(t, a, b); +} + +// CHECK-RV32-LABEL: @kmsxda( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kmsxda.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kmsxda(long t, unsigned long a, unsigned long b) { + return __rv_kmsxda(t, a, b); +} + +// CHECK-RV32-LABEL: @ksllw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksllw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long ksllw(long a, unsigned int b) { + return __rv_ksllw(a, b); +} + +// CHECK-RV32-LABEL: @ksll8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksll8.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ksll8(unsigned long a, unsigned int b) { + return __rv_ksll8(a, b); +} + +// CHECK-RV32-LABEL: @ksll16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksll16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ksll16(unsigned long a, unsigned int b) { + return __rv_ksll16(a, b); +} + +// CHECK-RV32-LABEL: @kslra8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kslra8.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kslra8(unsigned long a, int b) { + return __rv_kslra8(a, b); +} + +// CHECK-RV32-LABEL: @kslra8_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kslra8.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kslra8_u(unsigned long a, int b) { + return __rv_kslra8_u(a, b); +} + +// CHECK-RV32-LABEL: @kslra16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kslra16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kslra16(unsigned long a, int b) { + return __rv_kslra16(a, b); +} + +// CHECK-RV32-LABEL: @kslra16_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kslra16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kslra16_u(unsigned long a, int b) { + return __rv_kslra16(a, b); +} + +// CHECK-RV32-LABEL: @kslraw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kslraw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kslraw(int a, int b) { + return __rv_kslraw(a, b); +} + +// CHECK-RV32-LABEL: @kslraw_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kslraw.u.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kslraw_u(int a, int b) { + return __rv_kslraw_u(a, b); +} + +// CHECK-RV32-LABEL: @kstas16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kstas16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kstas16(unsigned long a, unsigned long b) { + return __rv_kstas16(a, b); +} + +// CHECK-RV32-LABEL: @kstsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kstsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long kstsa16(unsigned long a, unsigned long b) { + return __rv_kstsa16(a, b); +} + +// CHECK-RV32-LABEL: @ksub8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksub8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ksub8(unsigned long a, unsigned long b) { + return __rv_ksub8(a, b); +} + +// CHECK-RV32-LABEL: @ksub16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksub16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ksub16(unsigned long a, unsigned long b) { + return __rv_ksub16(a, b); +} + +// CHECK-RV32-LABEL: @ksubh( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksubh.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long ksubh(int a, int b) { + return __rv_ksubh(a, b); +} + +// CHECK-RV32-LABEL: @ksubw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ksubw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long ksubw(int a, int b) { + return __rv_ksubw(a, b); +} + +// CHECK-RV32-LABEL: @kwmmul( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kwmmul.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kwmmul(long a, long b) { + return __rv_kwmmul(a, b); +} + +// CHECK-RV32-LABEL: @kwmmul_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.kwmmul.u.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long kwmmul_u(long a, long b) { + return __rv_kwmmul_u(a, b); +} + +// CHECK-RV32-LABEL: @maxw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.maxw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long maxw(int a, int b) { + return __rv_maxw(a, b); +} + +// CHECK-RV32-LABEL: @minw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.minw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long minw(int a, int b) { + return __rv_minw(a, b); +} + +// CHECK-RV32-LABEL: @pbsad( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.pbsad.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long pbsad(unsigned long a, unsigned long b) { + return __rv_pbsad(a, b); +} + +// CHECK-RV32-LABEL: @pbsada( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.pbsada.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long pbsada(unsigned long t, unsigned long a, unsigned long b) { + return __rv_pbsada(t, a, b); +} + +// CHECK-RV32-LABEL: @pkbb16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.pkbb16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long pkbb16(unsigned long a, unsigned long b) { + return __rv_pkbb16(a, b); +} + +// CHECK-RV32-LABEL: @pkbt16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.pkbt16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long pkbt16(unsigned long a, unsigned long b) { + return __rv_pkbt16(a, b); +} + +// CHECK-RV32-LABEL: @pktt16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.pktt16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long pktt16(unsigned long a, unsigned long b) { + return __rv_pktt16(a, b); +} + +// CHECK-RV32-LABEL: @pktb16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.pktb16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long pktb16(unsigned long a, unsigned long b) { + return __rv_pktb16(a, b); +} + +// CHECK-RV32-LABEL: @radd8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.radd8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long radd8(unsigned a, unsigned b) { + return __rv_radd8(a, b); +} + +// CHECK-RV32-LABEL: @radd16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.radd16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long radd16(unsigned long a, unsigned long b) { + return __rv_radd16(a, b); +} + +// CHECK-RV32-LABEL: @raddw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.raddw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long raddw(int a, int b) { + return __rv_raddw(a, b); +} + +// CHECK-RV32-LABEL: @rcras16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rcras16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long rcras16(unsigned long a, unsigned long b) { + return __rv_rcras16(a, b); +} + +// CHECK-RV32-LABEL: @rcrsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rcrsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long rcrsa16(unsigned long a, unsigned long b) { + return __rv_rcrsa16(a, b); +} + +// CHECK-RV32-LABEL: @rstas16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rstas16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long rstas16(unsigned long a, unsigned long b) { + return __rv_rstas16(a, b); +} + +// CHECK-RV32-LABEL: @rstsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rstsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long rstsa16(unsigned long a, unsigned long b) { + return __rv_rstsa16(a, b); +} + +// CHECK-RV32-LABEL: @rsub8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rsub8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long rsub8(unsigned long a, unsigned long b) { + return __rv_rsub8(a, b); +} + +// CHECK-RV32-LABEL: @rsub16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rsub16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long rsub16(unsigned long a, unsigned long b) { + return __rv_rsub16(a, b); +} + +// CHECK-RV32-LABEL: @rsubw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.rsubw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long rsubw(int a, int b) { + return __rv_rsubw(a, b); +} + +// CHECK-RV32-LABEL: @sclip8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sclip8.i32.i32(i32 [[A:%.*]], i32 5) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sclip8(unsigned long a) { + return __rv_sclip8(a, 5); +} + +// CHECK-RV32-LABEL: @sclip16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sclip16.i32.i32(i32 [[A:%.*]], i32 6) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sclip16(unsigned long a) { + return __rv_sclip16(a, 6); +} + +// CHECK-RV32-LABEL: @sclip32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sclip32.i32.i32(i32 [[A:%.*]], i32 7) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long sclip32(long a) { + return __rv_sclip32(a, 7); +} + +// CHECK-RV32-LABEL: @scmple8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.scmple8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long scmple8(unsigned long a, unsigned long b) { + return __rv_scmple8(a, b); +} + +// CHECK-RV32-LABEL: @scmple16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.scmple16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long scmple16(unsigned long a, unsigned long b) { + return __rv_scmple16(a, b); +} + +// CHECK-RV32-LABEL: @scmplt8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.scmplt8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long scmplt8(unsigned long a, unsigned long b) { + return __rv_scmplt8(a, b); +} + +// CHECK-RV32-LABEL: @scmplt16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.scmplt16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long scmplt16(unsigned long a, unsigned long b) { + return __rv_scmplt16(a, b); +} + +// CHECK-RV32-LABEL: @sll8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sll8.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sll8(unsigned long a, unsigned int b) { + return __rv_sll8(a, b); +} + +// CHECK-RV32-LABEL: @sll16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sll16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sll16(unsigned long a, unsigned int b) { + return __rv_sll16(a, b); +} + +// CHECK-RV32-LABEL: @smaqa( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smaqa.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smaqa(long t, unsigned long a, unsigned long b) { + return __rv_smaqa(t, a, b); +} + +// CHECK-RV32-LABEL: @smaqa_su( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smaqa.su.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smaqa_su(long t, unsigned long a, unsigned long b) { + return __rv_smaqa_su(t, a, b); +} + +// CHECK-RV32-LABEL: @smax8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smax8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long smax8(unsigned long a, unsigned long b) { + return __rv_smax8(a, b); +} + +// CHECK-RV32-LABEL: @smax16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smax16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long smax16(unsigned long a, unsigned long b) { + return __rv_smax16(a, b); +} + +// CHECK-RV32-LABEL: @smbb16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smbb16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smbb16(unsigned long a, unsigned long b) { + return __rv_smbb16(a, b); +} + +// CHECK-RV32-LABEL: @smbt16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smbt16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smbt16(unsigned long a, unsigned long b) { + return __rv_smbt16(a, b); +} + +// CHECK-RV32-LABEL: @smtt16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smtt16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smtt16(unsigned long a, unsigned long b) { + return __rv_smtt16(a, b); +} + +// CHECK-RV32-LABEL: @smds( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smds.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smds(unsigned long a, unsigned long b) { + return __rv_smds(a, b); +} + +// CHECK-RV32-LABEL: @smdrs( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smdrs.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smdrs(unsigned long a, unsigned long b) { + return __rv_smdrs(a, b); +} + +// CHECK-RV32-LABEL: @smxds( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smxds.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smxds(unsigned long a, unsigned long b) { + return __rv_smxds(a, b); +} + +// CHECK-RV32-LABEL: @smin8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smin8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long smin8(unsigned long a, unsigned long b) { + return __rv_smin8(a, b); +} + +// CHECK-RV32-LABEL: @smin16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smin16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long smin16(unsigned long a, unsigned long b) { + return __rv_smin16(a, b); +} + +// CHECK-RV32-LABEL: @smmul( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smmul.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smmul(long a, long b) { + return __rv_smmul(a, b); +} + +// CHECK-RV32-LABEL: @smmul_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smmul.u.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smmul_u(long a, long b) { + return __rv_smmul_u(a, b); +} + +// CHECK-RV32-LABEL: @smmwb( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smmwb.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smmwb(long a, unsigned long b) { + return __rv_smmwb(a, b); +} + +// CHECK-RV32-LABEL: @smmwb_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smmwb.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smmwb_u(long a, unsigned long b) { + return __rv_smmwb_u(a, b); +} + +// CHECK-RV32-LABEL: @smmwt( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smmwt.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smmwt(long a, unsigned long b) { + return __rv_smmwt(a, b); +} + +// CHECK-RV32-LABEL: @smmwt_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.smmwt.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long smmwt_u(long a, unsigned long b) { + return __rv_smmwt_u(a, b); +} + +// CHECK-RV32-LABEL: @sra_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sra.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long sra_u(long a, unsigned int b) { + return __rv_sra_u(a, b); +} + +// CHECK-RV32-LABEL: @sra8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sra8.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sra8(unsigned long a, unsigned int b) { + return __rv_sra8(a, b); +} + +// CHECK-RV32-LABEL: @sra8_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sra8.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sra8_u(unsigned long a, unsigned int b) { + return __rv_sra8_u(a, b); +} + +// CHECK-RV32-LABEL: @sra16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sra16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sra16(unsigned long a, unsigned int b) { + return __rv_sra16(a, b); +} + +// CHECK-RV32-LABEL: @sra16_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sra16.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sra16_u(unsigned long a, unsigned int b) { + return __rv_sra16_u(a, b); +} + +// CHECK-RV32-LABEL: @srl8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.srl8.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long srl8(unsigned long a, unsigned int b) { + return __rv_srl8(a, b); +} + +// CHECK-RV32-LABEL: @srl8_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.srl8.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long srl8_u(unsigned long a, unsigned int b) { + return __rv_srl8_u(a, b); +} + +// CHECK-RV32-LABEL: @srl16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.srl16.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long srl16(unsigned long a, unsigned int b) { + return __rv_srl16(a, b); +} + +// CHECK-RV32-LABEL: @srl16_u( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.srl16.u.i32.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long srl16_u(unsigned long a, unsigned int b) { + return __rv_srl16_u(a, b); +} + +// CHECK-RV32-LABEL: @stas16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.stas16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long stas16(unsigned long a, unsigned long b) { + return __rv_stas16(a, b); +} + +// CHECK-RV32-LABEL: @stsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.stsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long stsa16(unsigned long a, unsigned long b) { + return __rv_stsa16(a, b); +} + +// CHECK-RV32-LABEL: @sub8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sub8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sub8(unsigned long a, unsigned long b) { + return __rv_sub8(a, b); +} + +// CHECK-RV32-LABEL: @sub16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sub16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sub16(unsigned long a, unsigned long b) { + return __rv_sub16(a, b); +} + +// CHECK-RV32-LABEL: @sunpkd810( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sunpkd810.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sunpkd810(unsigned long a) { + return __rv_sunpkd810(a); +} + +// CHECK-RV32-LABEL: @sunpkd820( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sunpkd820.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sunpkd820(unsigned long a) { + return __rv_sunpkd820(a); +} + +// CHECK-RV32-LABEL: @sunpkd830( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sunpkd830.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sunpkd830(unsigned long a) { + return __rv_sunpkd830(a); +} + +// CHECK-RV32-LABEL: @sunpkd831( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sunpkd831.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sunpkd831(unsigned long a) { + return __rv_sunpkd831(a); +} + +// CHECK-RV32-LABEL: @sunpkd832( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.sunpkd832.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long sunpkd832(unsigned long a) { + return __rv_sunpkd832(a); +} + +// CHECK-RV32-LABEL: @swap8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.swap8.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long swap8(unsigned long a) { + return __rv_swap8(a); +} + +// CHECK-RV32-LABEL: @swap16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.swap16.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long swap16(unsigned long a) { + return __rv_swap16(a); +} + +// CHECK-RV32-LABEL: @uclip8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uclip8.i32.i32(i32 [[A:%.*]], i32 5) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uclip8(unsigned long a) { + return __rv_uclip8(a, 5); +} + +// CHECK-RV32-LABEL: @uclip16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uclip16.i32.i32(i32 [[A:%.*]], i32 6) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uclip16(unsigned long a) { + return __rv_uclip16(a, 6); +} + +// CHECK-RV32-LABEL: @uclip32( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uclip32.i32.i32(i32 [[A:%.*]], i32 7) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long uclip32(long a) { + return __rv_uclip32(a, 7); +} + +// CHECK-RV32-LABEL: @ucmple8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ucmple8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ucmple8(unsigned long a, unsigned long b) { + return __rv_ucmple8(a, b); +} + +// CHECK-RV32-LABEL: @ucmple16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ucmple16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ucmple16(unsigned long a, unsigned long b) { + return __rv_ucmple16(a, b); +} + +// CHECK-RV32-LABEL: @ucmplt8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ucmplt8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ucmplt8(unsigned long a, unsigned long b) { + return __rv_ucmplt8(a, b); +} + +// CHECK-RV32-LABEL: @ucmplt16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ucmplt16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ucmplt16(unsigned long a, unsigned long b) { + return __rv_ucmplt16(a, b); +} + +// CHECK-RV32-LABEL: @ukadd8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukadd8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ukadd8(unsigned long a, unsigned long b) { + return __rv_ukadd8(a, b); +} + +// CHECK-RV32-LABEL: @ukadd16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukadd16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ukadd16(unsigned long a, unsigned long b) { + return __rv_ukadd16(a, b); +} + +// CHECK-RV32-LABEL: @ukaddh( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukaddh.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long ukaddh(int a, int b) { + return __rv_ukaddh(a, b); +} + +// CHECK-RV32-LABEL: @ukaddw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukaddw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +long ukaddw(int a, int b) { + return __rv_ukaddw(a, b); +} + +// CHECK-RV32-LABEL: @ukcras16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukcras16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ukcras16(unsigned long a, unsigned long b) { + return __rv_ukcras16(a, b); +} + +// CHECK-RV32-LABEL: @ukcrsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukcrsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ukcrsa16(unsigned long a, unsigned long b) { + return __rv_ukcrsa16(a, b); +} + +// CHECK-RV32-LABEL: @ukstas16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukstas16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ukstas16(unsigned long a, unsigned long b) { + return __rv_ukstas16(a, b); +} + +// CHECK-RV32-LABEL: @ukstsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ukstsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ukstsa16(unsigned long a, unsigned long b) { + return __rv_ukstsa16(a, b); +} + +// CHECK-RV32-LABEL: @uksub8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uksub8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uksub8(unsigned long a, unsigned long b) { + return __rv_uksub8(a, b); +} + +// CHECK-RV32-LABEL: @uksub16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uksub16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uksub16(unsigned long a, unsigned long b) { + return __rv_uksub16(a, b); +} + +// CHECK-RV32-LABEL: @uksubh( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uksubh.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uksubh(unsigned int a, unsigned int b) { + return __rv_uksubh(a, b); +} + +// CHECK-RV32-LABEL: @uksubw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uksubw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uksubw(unsigned int a, unsigned int b) { + return __rv_uksubw(a, b); +} + +// CHECK-RV32-LABEL: @umaqa( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.umaqa.i32.i32(i32 [[T:%.*]], i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long umaqa(unsigned long t, unsigned long a, unsigned long b) { + return __rv_umaqa(t, a, b); +} + +// CHECK-RV32-LABEL: @umax8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.umax8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long umax8(unsigned long a, unsigned long b) { + return __rv_umax8(a, b); +} + +// CHECK-RV32-LABEL: @umax16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.umax16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long umax16(unsigned long a, unsigned long b) { + return __rv_umax16(a, b); +} + +// CHECK-RV32-LABEL: @umin8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.umin8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long umin8(unsigned long a, unsigned long b) { + return __rv_umin8(a, b); +} + +// CHECK-RV32-LABEL: @umin16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.umin16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long umin16(unsigned long a, unsigned long b) { + return __rv_umin16(a, b); +} + +// CHECK-RV32-LABEL: @uradd8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uradd8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uradd8(unsigned long a, unsigned long b) { + return __rv_uradd8(a, b); +} + +// CHECK-RV32-LABEL: @uradd16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uradd16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uradd16(unsigned long a, unsigned long b) { + return __rv_uradd16(a, b); +} + +// CHECK-RV32-LABEL: @uraddw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.uraddw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long uraddw(unsigned int a, unsigned int b) { + return __rv_uraddw(a, b); +} + +// CHECK-RV32-LABEL: @urcras16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.urcras16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long urcras16(unsigned long a, unsigned long b) { + return __rv_urcras16(a, b); +} + +// CHECK-RV32-LABEL: @urcrsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.urcrsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long urcrsa16(unsigned long a, unsigned long b) { + return __rv_urcrsa16(a, b); +} + +// CHECK-RV32-LABEL: @urstas16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.urstas16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long urstas16(unsigned long a, unsigned long b) { + return __rv_urstas16(a, b); +} + +// CHECK-RV32-LABEL: @urstsa16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.urstsa16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long urstsa16(unsigned long a, unsigned long b) { + return __rv_urstsa16(a, b); +} + +// CHECK-RV32-LABEL: @ursub8( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ursub8.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ursub8(unsigned long a, unsigned long b) { + return __rv_ursub8(a, b); +} + +// CHECK-RV32-LABEL: @ursub16( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ursub16.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ursub16(unsigned long a, unsigned long b) { + return __rv_ursub16(a, b); +} + +// CHECK-RV32-LABEL: @ursubw( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.ursubw.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long ursubw(unsigned int a, unsigned int b) { + return __rv_ursubw(a, b); +} + +// CHECK-RV32-LABEL: @zunpkd810( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.zunpkd810.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long zunpkd810(unsigned long a) { + return __rv_zunpkd810(a); +} + +// CHECK-RV32-LABEL: @zunpkd820( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.zunpkd820.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long zunpkd820(unsigned long a) { + return __rv_zunpkd820(a); +} + +// CHECK-RV32-LABEL: @zunpkd830( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.zunpkd830.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long zunpkd830(unsigned long a) { + return __rv_zunpkd830(a); +} + +// CHECK-RV32-LABEL: @zunpkd831( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.zunpkd831.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long zunpkd831(unsigned long a) { + return __rv_zunpkd831(a); +} + +// CHECK-RV32-LABEL: @zunpkd832( +// CHECK-RV32-NEXT: entry: +// CHECK-RV32-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.riscv.zunpkd832.i32(i32 [[A:%.*]]) +// CHECK-RV32-NEXT: ret i32 [[TMP0]] +// +unsigned long zunpkd832(unsigned long a) { + return __rv_zunpkd832(a); +} diff --git a/clang/test/CodeGen/RISCV/rvp-intrinsics/rv64.c b/clang/test/CodeGen/RISCV/rvp-intrinsics/rv64.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvp-intrinsics/rv64.c @@ -0,0 +1,1770 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -O2 -target-feature +experimental-p \ +// RUN: -emit-llvm %s -o - | FileCheck --check-prefix=CHECK-RV64 %s + +typedef signed char int8x4_t __attribute((vector_size(4))); +typedef signed char int8x8_t __attribute((vector_size(8))); +typedef short int16x2_t __attribute((vector_size(4))); +typedef short int16x4_t __attribute__((vector_size(8))); +typedef short int16x8_t __attribute__((vector_size(16))); +typedef int int32x2_t __attribute__((vector_size(8))); +typedef int int32x4_t __attribute__((vector_size(16))); +typedef unsigned char uint8x4_t __attribute__((vector_size(4))); +typedef unsigned char uint8x8_t __attribute__((vector_size(8))); +typedef unsigned short uint16x2_t __attribute__((vector_size(4))); +typedef unsigned short uint16x4_t __attribute__((vector_size(8))); +typedef unsigned short uint16x8_t __attribute__((vector_size(16))); +typedef unsigned int uint32x2_t __attribute__((vector_size(8))); +typedef unsigned int uint32x4_t __attribute__((vector_size(16))); + +// CHECK-RV64-LABEL: @add8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.add8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long add8(unsigned long a, unsigned long b) { + return __rv_add8(a, b); +} + +// CHECK-RV64-LABEL: @add16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.add16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long add16(unsigned long a, unsigned long b) { + return __rv_add16(a, b); +} + +// CHECK-RV64-LABEL: @ave( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ave.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long ave(long a, long b) { + return __rv_ave(a, b); +} + +// CHECK-RV64-LABEL: @bitrev( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.bitrev.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long bitrev(unsigned long a, unsigned long b) { + return __rv_bitrev(a, b); +} + +// CHECK-RV64-LABEL: @bpick( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.bpick.i64(i64 [[A:%.*]], i64 [[B:%.*]], i64 [[C:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long bpick(unsigned long a, unsigned long b, unsigned long c) { + return __rv_bpick(a, b, c); +} + +// CHECK-RV64-LABEL: @clrs8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.clrs8.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long clrs8(unsigned long a) { + return __rv_clrs8(a); +} + +// CHECK-RV64-LABEL: @clrs16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.clrs16.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long clrs16(unsigned long a) { + return __rv_clrs16(a); +} + +// CHECK-RV64-LABEL: @clrs32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.clrs32.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long clrs32(unsigned long a) { + return __rv_clrs32(a); +} + +// CHECK-RV64-LABEL: @clz8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.clz8.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long clz8(unsigned long a) { + return __rv_clz8(a); +} + +// CHECK-RV64-LABEL: @clz16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.clz16.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long clz16(unsigned long a) { + return __rv_clz16(a); +} + +// CHECK-RV64-LABEL: @clz32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.clz32.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long clz32(unsigned long a) { + return __rv_clz32(a); +} + +// CHECK-RV64-LABEL: @cmpeq8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.cmpeq8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long cmpeq8(unsigned long a, unsigned long b) { + return __rv_cmpeq8(a, b); +} + +// CHECK-RV64-LABEL: @cmpeq16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.cmpeq16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long cmpeq16(unsigned long a, unsigned long b) { + return __rv_cmpeq16(a, b); +} + +// CHECK-RV64-LABEL: @cras16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.cras16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long cras16(unsigned long a, unsigned long b) { + return __rv_cras16(a, b); +} + +// CHECK-RV64-LABEL: @crsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.crsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long crsa16(unsigned long a, unsigned long b) { + return __rv_crsa16(a, b); +} + +// CHECK-RV64-LABEL: @insb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.insb.i64(i64 [[A:%.*]], i64 [[B:%.*]], i64 5) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long insb(unsigned long a, unsigned long b) { + return __rv_insb(a, b, 5); +} + +// CHECK-RV64-LABEL: @kabs8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kabs8.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kabs8(unsigned long a) { + return __rv_kabs8(a); +} + +// CHECK-RV64-LABEL: @kabs16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kabs16.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kabs16(unsigned long a) { + return __rv_kabs16(a); +} + +// CHECK-RV64-LABEL: @kabsw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kabsw.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kabsw(long a) { + return __rv_kabsw(a); +} + +// CHECK-RV64-LABEL: @kadd8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kadd8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kadd8(unsigned long a, unsigned long b) { + return __rv_kadd8(a, b); +} + +// CHECK-RV64-LABEL: @kadd16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kadd16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kadd16(unsigned long a, unsigned long b) { + return __rv_kadd16(a, b); +} + +// CHECK-RV64-LABEL: @kaddh( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kaddh.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kaddh(int a, int b) { + return __rv_kaddh(a, b); +} + +// CHECK-RV64-LABEL: @kaddw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kaddw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kaddw(int a, int b) { + return __rv_kaddw(a, b); +} + +// CHECK-RV64-LABEL: @kcras16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kcras16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kcras16(unsigned long a, unsigned long b) { + return __rv_kcras16(a, b); +} + +// CHECK-RV64-LABEL: @kcrsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kcrsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kcrsa16(unsigned long a, unsigned long b) { + return __rv_kcrsa16(a, b); +} + +// CHECK-RV64-LABEL: @kdmbb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kdmbb.i64.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kdmbb(unsigned int a, unsigned int b) { + return __rv_kdmbb(a, b); +} + +// CHECK-RV64-LABEL: @kdmbt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kdmbt.i64.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kdmbt(unsigned int a, unsigned int b) { + return __rv_kdmbt(a, b); +} + +// CHECK-RV64-LABEL: @kdmtt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kdmtt.i64.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kdmtt(unsigned int a, unsigned int b) { + return __rv_kdmtt(a, b); +} + +// CHECK-RV64-LABEL: @kdmabb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kdmabb.i64.i64(i64 [[T:%.*]], i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kdmabb(long t, unsigned int a, unsigned int b) { + return __rv_kdmabb(t, a, b); +} + +// CHECK-RV64-LABEL: @kdmabt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kdmabt.i64.i64(i64 [[T:%.*]], i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kdmabt(long t, unsigned int a, unsigned int b) { + return __rv_kdmabt(t, a, b); +} + +// CHECK-RV64-LABEL: @kdmatt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kdmatt.i64.i64(i64 [[T:%.*]], i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kdmatt(long t, unsigned int a, unsigned int b) { + return __rv_kdmatt(t, a, b); +} + +// CHECK-RV64-LABEL: @khm8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khm8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long khm8(unsigned long a, unsigned long b) { + return __rv_khm8(a, b); +} + +// CHECK-RV64-LABEL: @khmx8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khmx8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long khmx8(unsigned long a, unsigned long b) { + return __rv_khmx8(a, b); +} + +// CHECK-RV64-LABEL: @khm16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khm16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long khm16(unsigned long a, unsigned long b) { + return __rv_khm16(a, b); +} + +// CHECK-RV64-LABEL: @khmx16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khmx16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long khmx16(unsigned long a, unsigned long b) { + return __rv_khmx16(a, b); +} + +// CHECK-RV64-LABEL: @khmbb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khmbb.i64.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long khmbb(unsigned int a, unsigned int b) { + return __rv_khmbb(a, b); +} + +// CHECK-RV64-LABEL: @khmbt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khmbt.i64.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long khmbt(unsigned int a, unsigned int b) { + return __rv_khmbt(a, b); +} + +// CHECK-RV64-LABEL: @khmtt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.khmtt.i64.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long khmtt(unsigned int a, unsigned int b) { + return __rv_khmtt(a, b); +} + +// CHECK-RV64-LABEL: @kmabb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmabb.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmabb(long t, unsigned long a, unsigned long b) { + return __rv_kmabb(t, a, b); +} + +// CHECK-RV64-LABEL: @kmabt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmabt.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmabt(long t, unsigned long a, unsigned long b) { + return __rv_kmabt(t, a, b); +} + +// CHECK-RV64-LABEL: @kmatt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmatt.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmatt(long t, unsigned long a, unsigned long b) { + return __rv_kmatt(t, a, b); +} + +// CHECK-RV64-LABEL: @kmada( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmada.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmada(long t, unsigned long a, unsigned long b) { + return __rv_kmada(t, a, b); +} + +// CHECK-RV64-LABEL: @kmaxda( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmaxda.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmaxda(long t, unsigned long a, unsigned long b) { + return __rv_kmaxda(t, a, b); +} + +// CHECK-RV64-LABEL: @kmads( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmads.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmads(long t, unsigned long a, unsigned long b) { + return __rv_kmads(t, a, b); +} + +// CHECK-RV64-LABEL: @kmadrs( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmadrs.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmadrs(long t, unsigned long a, unsigned long b) { + return __rv_kmadrs(t, a, b); +} + +// CHECK-RV64-LABEL: @kmaxds( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmaxds.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmaxds(long t, unsigned long a, unsigned long b) { + return __rv_kmaxds(t, a, b); +} + +// CHECK-RV64-LABEL: @kmda( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmda.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmda(unsigned long a, unsigned long b) { + return __rv_kmda(a, b); +} + +// CHECK-RV64-LABEL: @kmxda( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmxda.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmxda(unsigned long a, unsigned long b) { + return __rv_kmxda(a, b); +} + +// CHECK-RV64-LABEL: @kmmac( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmac.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmac(long t, long a, long b) { + return __rv_kmmac(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmac_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmac.u.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmac_u(long t, long a, long b) { + return __rv_kmmac_u(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawb.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawb(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawb_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawb.u.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawb_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb_u(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawb2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawb2.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawb2(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb2(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawb2_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawb2.u.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawb2_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawb2_u(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawt.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawt(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawt_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawt.u.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawt_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt_u(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawt2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawt2.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawt2(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt2(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmawt2_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmawt2.u.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmawt2_u(long t, unsigned long a, unsigned long b) { + return __rv_kmmawt2_u(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmsb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmsb.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmsb(long t, long a, long b) { + return __rv_kmmsb(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmsb_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmsb.u.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmsb_u(long t, long a, long b) { + return __rv_kmmsb_u(t, a, b); +} + +// CHECK-RV64-LABEL: @kmmwb2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmwb2.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmwb2(long a, unsigned long b) { + return __rv_kmmwb2(a, b); +} + +// CHECK-RV64-LABEL: @kmmwb2_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmwb2.u.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmwb2_u(long a, unsigned long b) { + return __rv_kmmwb2_u(a, b); +} + +// CHECK-RV64-LABEL: @kmmwt2( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmwt2.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmwt2(long a, unsigned long b) { + return __rv_kmmwt2(a, b); +} + +// CHECK-RV64-LABEL: @kmmwt2_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmmwt2.u.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmmwt2_u(long a, unsigned long b) { + return __rv_kmmwt2_u(a, b); +} + +// CHECK-RV64-LABEL: @kmsda( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmsda.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmsda(long t, unsigned long a, unsigned long b) { + return __rv_kmsda(t, a, b); +} + +// CHECK-RV64-LABEL: @kmsxda( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kmsxda.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kmsxda(long t, unsigned long a, unsigned long b) { + return __rv_kmsxda(t, a, b); +} + +// CHECK-RV64-LABEL: @ksllw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksllw.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long ksllw(long a, unsigned int b) { + return __rv_ksllw(a, b); +} + +// CHECK-RV64-LABEL: @ksll8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksll8.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ksll8(unsigned long a, unsigned int b) { + return __rv_ksll8(a, b); +} + +// CHECK-RV64-LABEL: @ksll16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksll16.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ksll16(unsigned long a, unsigned int b) { + return __rv_ksll16(a, b); +} + +// CHECK-RV64-LABEL: @kslra8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kslra8.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kslra8(unsigned long a, int b) { + return __rv_kslra8(a, b); +} + +// CHECK-RV64-LABEL: @kslra8_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kslra8.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kslra8_u(unsigned long a, int b) { + return __rv_kslra8_u(a, b); +} + +// CHECK-RV64-LABEL: @kslra16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kslra16.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kslra16(unsigned long a, int b) { + return __rv_kslra16(a, b); +} + +// CHECK-RV64-LABEL: @kslra16_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kslra16.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kslra16_u(unsigned long a, int b) { + return __rv_kslra16_u(a, b); +} + +// CHECK-RV64-LABEL: @kslraw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kslraw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kslraw(int a, int b) { + return __rv_kslraw(a, b); +} + +// CHECK-RV64-LABEL: @kslraw_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kslraw.u.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kslraw_u(int a, int b) { + return __rv_kslraw_u(a, b); +} + +// CHECK-RV64-LABEL: @kstas16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kstas16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kstas16(unsigned long a, unsigned long b) { + return __rv_kstas16(a, b); +} + +// CHECK-RV64-LABEL: @kstsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kstsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long kstsa16(unsigned long a, unsigned long b) { + return __rv_kstsa16(a, b); +} + +// CHECK-RV64-LABEL: @ksub8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksub8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ksub8(unsigned long a, unsigned long b) { + return __rv_ksub8(a, b); +} + +// CHECK-RV64-LABEL: @ksub16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksub16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ksub16(unsigned long a, unsigned long b) { + return __rv_ksub16(a, b); +} + +// CHECK-RV64-LABEL: @ksubh( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksubh.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long ksubh(int a, int b) { + return __rv_ksubh(a, b); +} + +// CHECK-RV64-LABEL: @ksubw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ksubw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long ksubw(int a, int b) { + return __rv_ksubw(a, b); +} + +// CHECK-RV64-LABEL: @kwmmul( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kwmmul.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kwmmul(long a, long b) { + return __rv_kwmmul(a, b); +} + +// CHECK-RV64-LABEL: @kwmmul_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.kwmmul.u.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long kwmmul_u(long a, long b) { + return __rv_kwmmul_u(a, b); +} + +// CHECK-RV64-LABEL: @maxw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.maxw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long maxw(int a, int b) { + return __rv_maxw(a, b); +} + +// CHECK-RV64-LABEL: @minw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.minw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long minw(int a, int b) { + return __rv_minw(a, b); +} + +// CHECK-RV64-LABEL: @pbsad( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.pbsad.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long pbsad(unsigned long a, unsigned long b) { + return __rv_pbsad(a, b); +} + +// CHECK-RV64-LABEL: @pbsada( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.pbsada.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long pbsada(unsigned long t, unsigned long a, unsigned long b) { + return __rv_pbsada(t, a, b); +} + +// CHECK-RV64-LABEL: @pkbb16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.pkbb16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long pkbb16(unsigned long a, unsigned long b) { + return __rv_pkbb16(a, b); +} + +// CHECK-RV64-LABEL: @pkbt16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.pkbt16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long pkbt16(unsigned long a, unsigned long b) { + return __rv_pkbt16(a, b); +} + +// CHECK-RV64-LABEL: @pktt16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.pktt16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long pktt16(unsigned long a, unsigned long b) { + return __rv_pktt16(a, b); +} + +// CHECK-RV64-LABEL: @pktb16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.pktb16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long pktb16(unsigned long a, unsigned long b) { + return __rv_pktb16(a, b); +} + +// CHECK-RV64-LABEL: @radd8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.radd8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long radd8(unsigned long a, unsigned long b) { + return __rv_radd8(a, b); +} + +// CHECK-RV64-LABEL: @radd16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.radd8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long radd16(unsigned long a, unsigned long b) { + return __rv_radd8(a, b); +} + +// CHECK-RV64-LABEL: @raddw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.raddw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long raddw(int a, int b) { + return __rv_raddw(a, b); +} + +// CHECK-RV64-LABEL: @rcras16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rcras16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long rcras16(unsigned long a, unsigned long b) { + return __rv_rcras16(a, b); +} + +// CHECK-RV64-LABEL: @rcrsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rcrsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long rcrsa16(unsigned long a, unsigned long b) { + return __rv_rcrsa16(a, b); +} + +// CHECK-RV64-LABEL: @rstas16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rstas16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long rstas16(unsigned long a, unsigned long b) { + return __rv_rstas16(a, b); +} + +// CHECK-RV64-LABEL: @rstsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rstsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long rstsa16(unsigned long a, unsigned long b) { + return __rv_rstsa16(a, b); +} + +// CHECK-RV64-LABEL: @rsub8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rsub8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long rsub8(unsigned long a, unsigned long b) { + return __rv_rsub8(a, b); +} + +// CHECK-RV64-LABEL: @rsub16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rsub16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long rsub16(unsigned long a, unsigned long b) { + return __rv_rsub16(a, b); +} + +// CHECK-RV64-LABEL: @rsubw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.rsubw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long rsubw(int a, int b) { + return __rv_rsubw(a, b); +} + +// CHECK-RV64-LABEL: @sclip8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sclip8.i64.i64(i64 [[A:%.*]], i64 7) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sclip8(unsigned long a) { + return __rv_sclip8(a, 7); +} + +// CHECK-RV64-LABEL: @sclip16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sclip16.i64.i64(i64 [[A:%.*]], i64 8) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sclip16(unsigned long a) { + return __rv_sclip16(a, 8); +} + +// CHECK-RV64-LABEL: @sclip32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sclip32.i64.i64(i64 [[A:%.*]], i64 9) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long sclip32(long a) { + return __rv_sclip32(a, 9); +} + +// CHECK-RV64-LABEL: @scmple8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.scmple8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long scmple8(unsigned long a, unsigned long b) { + return __rv_scmple8(a, b); +} + +// CHECK-RV64-LABEL: @scmple16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.scmple16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long scmple16(unsigned long a, unsigned long b) { + return __rv_scmple16(a, b); +} + +// CHECK-RV64-LABEL: @scmplt8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.scmplt8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long scmplt8(unsigned long a, unsigned long b) { + return __rv_scmplt8(a, b); +} + +// CHECK-RV64-LABEL: @scmplt16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.scmplt16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long scmplt16(unsigned long a, unsigned long b) { + return __rv_scmplt16(a, b); +} + +// CHECK-RV64-LABEL: @sll8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sll8.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sll8(unsigned long a, unsigned int b) { + return __rv_sll8(a, b); +} + +// CHECK-RV64-LABEL: @sll16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sll16.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sll16(unsigned long a, unsigned int b) { + return __rv_sll16(a, b); +} + +// CHECK-RV64-LABEL: @smaqa( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smaqa.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smaqa(long t, unsigned long a, unsigned long b) { + return __rv_smaqa(t, a, b); +} + +// CHECK-RV64-LABEL: @smaqa_su( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smaqa.su.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smaqa_su(long t, unsigned long a, unsigned long b) { + return __rv_smaqa_su(t, a, b); +} + +// CHECK-RV64-LABEL: @smax8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smax8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long smax8(unsigned long a, unsigned long b) { + return __rv_smax8(a, b); +} + +// CHECK-RV64-LABEL: @smax16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smax16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long smax16(unsigned long a, unsigned long b) { + return __rv_smax16(a, b); +} + +// CHECK-RV64-LABEL: @smbb16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smbb16.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smbb16(unsigned long a, unsigned long b) { + return __rv_smbb16(a, b); +} + +// CHECK-RV64-LABEL: @smbt16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smbt16.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smbt16(unsigned long a, unsigned long b) { + return __rv_smbt16(a, b); +} + +// CHECK-RV64-LABEL: @smtt16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smtt16.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smtt16(unsigned long a, unsigned long b) { + return __rv_smtt16(a, b); +} + +// CHECK-RV64-LABEL: @smds( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smds.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smds(unsigned long a, unsigned long b) { + return __rv_smds(a, b); +} + +// CHECK-RV64-LABEL: @smdrs( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smdrs.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smdrs(unsigned long a, unsigned long b) { + return __rv_smdrs(a, b); +} + +// CHECK-RV64-LABEL: @smxds( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smxds.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smxds(unsigned long a, unsigned long b) { + return __rv_smxds(a, b); +} + +// CHECK-RV64-LABEL: @smin8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smin8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long smin8(unsigned long a, unsigned long b) { + return __rv_smin8(a, b); +} + +// CHECK-RV64-LABEL: @smin16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smin16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long smin16(unsigned long a, unsigned long b) { + return __rv_smin16(a, b); +} + +// CHECK-RV64-LABEL: @smmul( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smmul.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smmul(long a, long b) { + return __rv_smmul(a, b); +} + +// CHECK-RV64-LABEL: @smmul_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smmul.u.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smmul_u(long a, long b) { + return __rv_smmul_u(a, b); +} + +// CHECK-RV64-LABEL: @smmwb( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smmwb.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smmwb(long a, long b) { + return __rv_smmwb(a, b); +} + +// CHECK-RV64-LABEL: @smmwb_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smmwb.u.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smmwb_u(long a, long b) { + return __rv_smmwb_u(a, b); +} + +// CHECK-RV64-LABEL: @smmwt( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smmwt.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smmwt(long a, long b) { + return __rv_smmwt(a, b); +} + +// CHECK-RV64-LABEL: @smmwt_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.smmwt.u.i64.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long smmwt_u(long a, long b) { + return __rv_smmwt_u(a, b); +} + +// CHECK-RV64-LABEL: @sra_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sra.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long sra_u(long a, unsigned int b) { + return __rv_sra_u(a, b); +} + +// CHECK-RV64-LABEL: @sra8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sra8.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sra8(unsigned long a, unsigned int b) { + return __rv_sra8(a, b); +} + +// CHECK-RV64-LABEL: @sra8_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sra8.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sra8_u(unsigned long a, unsigned int b) { + return __rv_sra8_u(a, b); +} + +// CHECK-RV64-LABEL: @sra16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sra16.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sra16(unsigned long a, unsigned int b) { + return __rv_sra16(a, b); +} + +// CHECK-RV64-LABEL: @sra16_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sra16.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sra16_u(unsigned long a, unsigned int b) { + return __rv_sra16_u(a, b); +} + +// CHECK-RV64-LABEL: @srl8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.srl8.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long srl8(unsigned long a, unsigned int b) { + return __rv_srl8(a, b); +} + +// CHECK-RV64-LABEL: @srl8_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.srl8.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long srl8_u(unsigned long a, unsigned int b) { + return __rv_srl8_u(a, b); +} + +// CHECK-RV64-LABEL: @srl16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.srl16.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long srl16(unsigned long a, unsigned int b) { + return __rv_srl16(a, b); +} + +// CHECK-RV64-LABEL: @srl16_u( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.srl16.u.i64.i64(i64 [[A:%.*]], i64 [[CONV]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long srl16_u(unsigned long a, unsigned int b) { + return __rv_srl16_u(a, b); +} + +// CHECK-RV64-LABEL: @stas16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.stas16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long stas16(unsigned long a, unsigned long b) { + return __rv_stas16(a, b); +} + +// CHECK-RV64-LABEL: @stsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.stsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long stsa16(unsigned long a, unsigned long b) { + return __rv_stsa16(a, b); +} + +// CHECK-RV64-LABEL: @sub8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sub8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sub8(unsigned long a, unsigned long b) { + return __rv_sub8(a, b); +} + +// CHECK-RV64-LABEL: @sub16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sub16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sub16(unsigned long a, unsigned long b) { + return __rv_sub16(a, b); +} + +// CHECK-RV64-LABEL: @sunpkd810( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sunpkd810.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sunpkd810(unsigned long a) { + return __rv_sunpkd810(a); +} + +// CHECK-RV64-LABEL: @sunpkd820( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sunpkd820.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sunpkd820(unsigned long a) { + return __rv_sunpkd820(a); +} + +// CHECK-RV64-LABEL: @sunpkd830( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sunpkd830.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sunpkd830(unsigned long a) { + return __rv_sunpkd830(a); +} + +// CHECK-RV64-LABEL: @sunpkd831( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sunpkd831.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sunpkd831(unsigned long a) { + return __rv_sunpkd831(a); +} + +// CHECK-RV64-LABEL: @sunpkd832( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.sunpkd832.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long sunpkd832(unsigned long a) { + return __rv_sunpkd832(a); +} + +// CHECK-RV64-LABEL: @swap8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.swap8.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long swap8(unsigned long a) { + return __rv_swap8(a); +} + +// CHECK-RV64-LABEL: @swap16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.swap16.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long swap16(unsigned long a) { + return __rv_swap16(a); +} + +// CHECK-RV64-LABEL: @uclip8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uclip8.i64.i64(i64 [[A:%.*]], i64 7) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uclip8(unsigned long a) { + return __rv_uclip8(a, 7); +} + +// CHECK-RV64-LABEL: @uclip16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uclip16.i64.i64(i64 [[A:%.*]], i64 8) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uclip16(unsigned long a) { + return __rv_uclip16(a, 8); +} + +// CHECK-RV64-LABEL: @uclip32( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uclip32.i64.i64(i64 [[A:%.*]], i64 9) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long uclip32(long a) { + return __rv_uclip32(a, 9); +} + +// CHECK-RV64-LABEL: @ucmple8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ucmple8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ucmple8(unsigned long a, unsigned long b) { + return __rv_ucmple8(a, b); +} + +// CHECK-RV64-LABEL: @ucmple16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ucmple16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ucmple16(unsigned long a, unsigned long b) { + return __rv_ucmple16(a, b); +} + +// CHECK-RV64-LABEL: @ucmplt8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ucmplt8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ucmplt8(unsigned long a, unsigned long b) { + return __rv_ucmplt8(a, b); +} + +// CHECK-RV64-LABEL: @ucmplt16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ucmplt16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ucmplt16(unsigned long a, unsigned long b) { + return __rv_ucmplt16(a, b); +} + +// CHECK-RV64-LABEL: @ukadd8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukadd8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ukadd8(unsigned long a, unsigned long b) { + return __rv_ukadd8(a, b); +} + +// CHECK-RV64-LABEL: @ukadd16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukadd16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ukadd16(unsigned long a, unsigned long b) { + return __rv_ukadd16(a, b); +} + +// CHECK-RV64-LABEL: @ukaddh( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukaddh.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long ukaddh(int a, int b) { + return __rv_ukaddh(a, b); +} + +// CHECK-RV64-LABEL: @ukaddw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = sext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = sext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukaddw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +long ukaddw(int a, int b) { + return __rv_ukaddw(a, b); +} + +// CHECK-RV64-LABEL: @ukcras16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukcras16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ukcras16(unsigned long a, unsigned long b) { + return __rv_ukcras16(a, b); +} + +// CHECK-RV64-LABEL: @ukcrsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukcrsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ukcrsa16(unsigned long a, unsigned long b) { + return __rv_ukcrsa16(a, b); +} + +// CHECK-RV64-LABEL: @ukstas16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukstas16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ukstas16(unsigned long a, unsigned long b) { + return __rv_ukstas16(a, b); +} + +// CHECK-RV64-LABEL: @ukstsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ukstsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ukstsa16(unsigned long a, unsigned long b) { + return __rv_ukstsa16(a, b); +} + +// CHECK-RV64-LABEL: @uksub8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uksub8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uksub8(unsigned long a, unsigned long b) { + return __rv_uksub8(a, b); +} + +// CHECK-RV64-LABEL: @uksub16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uksub16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uksub16(unsigned long a, unsigned long b) { + return __rv_uksub16(a, b); +} + +// CHECK-RV64-LABEL: @uksubh( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uksubh.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uksubh(unsigned int a, unsigned int b) { + return __rv_uksubh(a, b); +} + +// CHECK-RV64-LABEL: @uksubw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uksubw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uksubw(unsigned int a, unsigned int b) { + return __rv_uksubw(a, b); +} + +// CHECK-RV64-LABEL: @umaqa( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.umaqa.i64.i64(i64 [[T:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long umaqa(unsigned long t, unsigned long a, unsigned long b) { + return __rv_umaqa(t, a, b); +} + +// CHECK-RV64-LABEL: @umax8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.umax8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long umax8(unsigned long a, unsigned long b) { + return __rv_umax8(a, b); +} + +// CHECK-RV64-LABEL: @umax16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.umax16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long umax16(unsigned long a, unsigned long b) { + return __rv_umax16(a, b); +} + +// CHECK-RV64-LABEL: @umin8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.umin8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long umin8(unsigned long a, unsigned long b) { + return __rv_umin8(a, b); +} + +// CHECK-RV64-LABEL: @umin16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.umin16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long umin16(unsigned long a, unsigned long b) { + return __rv_umin16(a, b); +} + +// CHECK-RV64-LABEL: @uradd8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uradd8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uradd8(unsigned long a, unsigned long b) { + return __rv_uradd8(a, b); +} + +// CHECK-RV64-LABEL: @uradd16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uradd8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uradd16(unsigned long a, unsigned long b) { + return __rv_uradd8(a, b); +} + +// CHECK-RV64-LABEL: @uraddw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.uraddw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long uraddw(unsigned int a, unsigned int b) { + return __rv_uraddw(a, b); +} + +// CHECK-RV64-LABEL: @urcras16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.urcras16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long urcras16(unsigned long a, unsigned long b) { + return __rv_urcras16(a, b); +} + +// CHECK-RV64-LABEL: @urcrsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.urcrsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long urcrsa16(unsigned long a, unsigned long b) { + return __rv_urcrsa16(a, b); +} + +// CHECK-RV64-LABEL: @urstas16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.urstas16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long urstas16(unsigned long a, unsigned long b) { + return __rv_urstas16(a, b); +} + +// CHECK-RV64-LABEL: @urstsa16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.urstsa16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long urstsa16(unsigned long a, unsigned long b) { + return __rv_urstsa16(a, b); +} + +// CHECK-RV64-LABEL: @ursub8( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ursub8.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ursub8(unsigned long a, unsigned long b) { + return __rv_ursub8(a, b); +} + +// CHECK-RV64-LABEL: @ursub16( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ursub16.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ursub16(unsigned long a, unsigned long b) { + return __rv_ursub16(a, b); +} + +// CHECK-RV64-LABEL: @ursubw( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[CONV:%.*]] = zext i32 [[A:%.*]] to i64 +// CHECK-RV64-NEXT: [[CONV1:%.*]] = zext i32 [[B:%.*]] to i64 +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.ursubw.i64(i64 [[CONV]], i64 [[CONV1]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long ursubw(unsigned int a, unsigned int b) { + return __rv_ursubw(a, b); +} + +// CHECK-RV64-LABEL: @zunpkd810( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.zunpkd810.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long zunpkd810(unsigned long a) { + return __rv_zunpkd810(a); +} + +// CHECK-RV64-LABEL: @zunpkd820( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.zunpkd820.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long zunpkd820(unsigned long a) { + return __rv_zunpkd820(a); +} + +// CHECK-RV64-LABEL: @zunpkd830( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.zunpkd830.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long zunpkd830(unsigned long a) { + return __rv_zunpkd830(a); +} + +// CHECK-RV64-LABEL: @zunpkd831( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.zunpkd831.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long zunpkd831(unsigned long a) { + return __rv_zunpkd831(a); +} + +// CHECK-RV64-LABEL: @zunpkd832( +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.riscv.zunpkd832.i64(i64 [[A:%.*]]) +// CHECK-RV64-NEXT: ret i64 [[TMP0]] +// +unsigned long zunpkd832(unsigned long a) { + return __rv_zunpkd832(a); +} diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -1610,3 +1610,265 @@ def int_riscv_sm3p0 : ScalarCryptoGprIntrinsicAny; def int_riscv_sm3p1 : ScalarCryptoGprIntrinsicAny; } // TargetPrefix = "riscv" + +//===----------------------------------------------------------------------===// +// Packing SIMD + +let TargetPrefix = "riscv" in { + class RVPUnaryIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>], + [IntrNoMem]>; + + multiclass RVPUnaryIntrinsics { + def "int_riscv_" # NAME : RVPUnaryIntrinsics; + } + + defm clrs8 : RVPUnaryIntrinsics; + defm clrs16 : RVPUnaryIntrinsics; + defm clrs32 : RVPUnaryIntrinsics; + defm clz8 : RVPUnaryIntrinsics; + defm clz16 : RVPUnaryIntrinsics; + defm clz32 : RVPUnaryIntrinsics; + defm kabs8 : RVPUnaryIntrinsics; + defm kabs16 : RVPUnaryIntrinsics; + defm kabsw : RVPUnaryIntrinsics; + defm swap8 : RVPUnaryIntrinsics; + defm swap16 : RVPUnaryIntrinsics; + defm sunpkd810 : RVPUnaryIntrinsics; + defm sunpkd820 : RVPUnaryIntrinsics; + defm sunpkd830 : RVPUnaryIntrinsics; + defm sunpkd831 : RVPUnaryIntrinsics; + defm sunpkd832 : RVPUnaryIntrinsics; + defm zunpkd810 : RVPUnaryIntrinsics; + defm zunpkd820 : RVPUnaryIntrinsics; + defm zunpkd830 : RVPUnaryIntrinsics; + defm zunpkd831 : RVPUnaryIntrinsics; + defm zunpkd832 : RVPUnaryIntrinsics; + + class RVPBinaryIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; + + multiclass RVPBinaryIntrinsics { + def "int_riscv_" # NAME : RVPBinaryIntrinsics; + } + + defm add8 : RVPBinaryIntrinsics; + defm add16 : RVPBinaryIntrinsics; + defm ave : RVPBinaryIntrinsics; + defm bitrev : RVPBinaryIntrinsics; + defm cmpeq8 : RVPBinaryIntrinsics; + defm cmpeq16 : RVPBinaryIntrinsics; + defm cras16 : RVPBinaryIntrinsics; + defm crsa16 : RVPBinaryIntrinsics; + defm kadd8 : RVPBinaryIntrinsics; + defm kadd16 : RVPBinaryIntrinsics; + defm kaddh : RVPBinaryIntrinsics; + defm kaddw : RVPBinaryIntrinsics; + defm kcras16 : RVPBinaryIntrinsics; + defm kcrsa16 : RVPBinaryIntrinsics; + defm khm8 : RVPBinaryIntrinsics; + defm khmx8 : RVPBinaryIntrinsics; + defm khm16 : RVPBinaryIntrinsics; + defm khmx16 : RVPBinaryIntrinsics; + defm ksllw : RVPBinaryIntrinsics; + defm kslraw : RVPBinaryIntrinsics; + defm kslraw_u : RVPBinaryIntrinsics; + defm kstas16 : RVPBinaryIntrinsics; + defm kstsa16 : RVPBinaryIntrinsics; + defm ksub8 : RVPBinaryIntrinsics; + defm ksub16 : RVPBinaryIntrinsics; + defm ksubh : RVPBinaryIntrinsics; + defm ksubw : RVPBinaryIntrinsics; + defm kwmmul : RVPBinaryIntrinsics; + defm kwmmul_u : RVPBinaryIntrinsics; + defm maxw : RVPBinaryIntrinsics; + defm minw : RVPBinaryIntrinsics; + defm pkbb16 : RVPBinaryIntrinsics; + defm pkbt16 : RVPBinaryIntrinsics; + defm pktt16 : RVPBinaryIntrinsics; + defm pktb16 : RVPBinaryIntrinsics; + defm radd8 : RVPBinaryIntrinsics; + defm radd16 : RVPBinaryIntrinsics; + defm raddw : RVPBinaryIntrinsics; + defm rcras16 : RVPBinaryIntrinsics; + defm rcrsa16 : RVPBinaryIntrinsics; + defm rstas16 : RVPBinaryIntrinsics; + defm rstsa16 : RVPBinaryIntrinsics; + defm rsub8 : RVPBinaryIntrinsics; + defm rsub16 : RVPBinaryIntrinsics; + defm rsubw : RVPBinaryIntrinsics; + defm scmple8 : RVPBinaryIntrinsics; + defm scmple16 : RVPBinaryIntrinsics; + defm scmplt8 : RVPBinaryIntrinsics; + defm scmplt16 : RVPBinaryIntrinsics; + defm smax8 : RVPBinaryIntrinsics; + defm smax16 : RVPBinaryIntrinsics; + defm smin8 : RVPBinaryIntrinsics; + defm smin16 : RVPBinaryIntrinsics; + defm smmul : RVPBinaryIntrinsics; + defm smmul_u : RVPBinaryIntrinsics; + defm stas16 : RVPBinaryIntrinsics; + defm stsa16 : RVPBinaryIntrinsics; + defm sub8 : RVPBinaryIntrinsics; + defm sub16 : RVPBinaryIntrinsics; + defm ucmple8 : RVPBinaryIntrinsics; + defm ucmple16 : RVPBinaryIntrinsics; + defm ucmplt8 : RVPBinaryIntrinsics; + defm ucmplt16 : RVPBinaryIntrinsics; + defm ukadd8 : RVPBinaryIntrinsics; + defm ukadd16 : RVPBinaryIntrinsics; + defm ukaddh : RVPBinaryIntrinsics; + defm ukaddw : RVPBinaryIntrinsics; + defm ukcras16 : RVPBinaryIntrinsics; + defm ukcrsa16 : RVPBinaryIntrinsics; + defm ukstas16 : RVPBinaryIntrinsics; + defm ukstsa16 : RVPBinaryIntrinsics; + defm uksub8 : RVPBinaryIntrinsics; + defm uksub16 : RVPBinaryIntrinsics; + defm uksubh : RVPBinaryIntrinsics; + defm uksubw : RVPBinaryIntrinsics; + defm umax8 : RVPBinaryIntrinsics; + defm umax16 : RVPBinaryIntrinsics; + defm umin8 : RVPBinaryIntrinsics; + defm umin16 : RVPBinaryIntrinsics; + defm uradd8 : RVPBinaryIntrinsics; + defm uradd16 : RVPBinaryIntrinsics; + defm uraddw : RVPBinaryIntrinsics; + defm urcras16 : RVPBinaryIntrinsics; + defm urcrsa16 : RVPBinaryIntrinsics; + defm urstas16 : RVPBinaryIntrinsics; + defm urstsa16 : RVPBinaryIntrinsics; + defm ursub8 : RVPBinaryIntrinsics; + defm ursub16 : RVPBinaryIntrinsics; + defm ursubw : RVPBinaryIntrinsics; + + class RVPBinaryABBIntrinsics + : Intrinsic<[llvm_any_ty], + [llvm_any_ty, LLVMMatchType<1>], + [IntrNoMem]>; + + multiclass RVPBinaryABBIntrinsics { + def "int_riscv_" # NAME : RVPBinaryABBIntrinsics; + } + + defm kdmbb : RVPBinaryABBIntrinsics; + defm kdmbt : RVPBinaryABBIntrinsics; + defm kdmtt : RVPBinaryABBIntrinsics; + defm khmbb : RVPBinaryABBIntrinsics; + defm khmbt : RVPBinaryABBIntrinsics; + defm khmtt : RVPBinaryABBIntrinsics; + defm kmda : RVPBinaryABBIntrinsics; + defm kmxda : RVPBinaryABBIntrinsics; + defm pbsad : RVPBinaryABBIntrinsics; + defm smbb16 : RVPBinaryABBIntrinsics; + defm smbt16 : RVPBinaryABBIntrinsics; + defm smtt16 : RVPBinaryABBIntrinsics; + defm smds : RVPBinaryABBIntrinsics; + defm smdrs : RVPBinaryABBIntrinsics; + defm smxds : RVPBinaryABBIntrinsics; + + class RVPBinaryAABIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, llvm_any_ty], + [IntrNoMem]>; + + multiclass RVPBinaryAABIntrinsics { + def "int_riscv_" # NAME : RVPBinaryAABIntrinsics; + } + + defm kmmwb2 : RVPBinaryAABIntrinsics; + defm kmmwb2_u : RVPBinaryAABIntrinsics; + defm kmmwt2 : RVPBinaryAABIntrinsics; + defm kmmwt2_u : RVPBinaryAABIntrinsics; + defm ksll8 : RVPBinaryAABIntrinsics; + defm ksll16 : RVPBinaryAABIntrinsics; + defm kslra8 : RVPBinaryAABIntrinsics; + defm kslra8_u : RVPBinaryAABIntrinsics; + defm kslra16 : RVPBinaryAABIntrinsics; + defm kslra16_u : RVPBinaryAABIntrinsics; + defm sclip8 : RVPBinaryAABIntrinsics; + defm sclip16 : RVPBinaryAABIntrinsics; + defm sclip32 : RVPBinaryAABIntrinsics; + defm sll8 : RVPBinaryAABIntrinsics; + defm sll16 : RVPBinaryAABIntrinsics; + defm smmwb : RVPBinaryAABIntrinsics; + defm smmwb_u : RVPBinaryAABIntrinsics; + defm smmwt : RVPBinaryAABIntrinsics; + defm smmwt_u : RVPBinaryAABIntrinsics; + defm sra_u : RVPBinaryAABIntrinsics; + defm sra8 : RVPBinaryAABIntrinsics; + defm sra8_u : RVPBinaryAABIntrinsics; + defm sra16 : RVPBinaryAABIntrinsics; + defm sra16_u : RVPBinaryAABIntrinsics; + defm srl8 : RVPBinaryAABIntrinsics; + defm srl8_u : RVPBinaryAABIntrinsics; + defm srl16 : RVPBinaryAABIntrinsics; + defm srl16_u : RVPBinaryAABIntrinsics; + defm uclip8 : RVPBinaryAABIntrinsics; + defm uclip16 : RVPBinaryAABIntrinsics; + defm uclip32 : RVPBinaryAABIntrinsics; + + class RVPTernaryIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; + + multiclass RVPTernaryIntrinsics { + def "int_riscv_" # NAME : RVPTernaryIntrinsics; + } + + defm bpick : RVPTernaryIntrinsics; + defm insb : RVPTernaryIntrinsics; + defm kmmac : RVPTernaryIntrinsics; + defm kmmac_u : RVPTernaryIntrinsics; + defm kmmsb : RVPTernaryIntrinsics; + defm kmmsb_u : RVPTernaryIntrinsics; + + class RVPTernaryAABBIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<1>], + [IntrNoMem]>; + + multiclass RVPTernaryAABBIntrinsics { + def "int_riscv_" # NAME : RVPTernaryAABBIntrinsics; + } + + defm kdmabb : RVPTernaryAABBIntrinsics; + defm kdmabt : RVPTernaryAABBIntrinsics; + defm kdmatt : RVPTernaryAABBIntrinsics; + defm kmabb : RVPTernaryAABBIntrinsics; + defm kmabt : RVPTernaryAABBIntrinsics; + defm kmatt : RVPTernaryAABBIntrinsics; + defm kmada : RVPTernaryAABBIntrinsics; + defm kmaxda : RVPTernaryAABBIntrinsics; + defm kmads : RVPTernaryAABBIntrinsics; + defm kmadrs : RVPTernaryAABBIntrinsics; + defm kmaxds : RVPTernaryAABBIntrinsics; + defm kmsda : RVPTernaryAABBIntrinsics; + defm kmsxda : RVPTernaryAABBIntrinsics; + defm pbsada : RVPTernaryAABBIntrinsics; + defm smaqa : RVPTernaryAABBIntrinsics; + defm smaqa_su : RVPTernaryAABBIntrinsics; + defm umaqa : RVPTernaryAABBIntrinsics; + + class RVPTernaryAAABIntrinsics + : Intrinsic<[llvm_any_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_any_ty], + [IntrNoMem]>; + + multiclass RVPTernaryAAABIntrinsics { + def "int_riscv_" # NAME : RVPTernaryAAABIntrinsics; + } + + defm kmmawb : RVPTernaryAAABIntrinsics; + defm kmmawb_u : RVPTernaryAAABIntrinsics; + defm kmmawb2 : RVPTernaryAAABIntrinsics; + defm kmmawb2_u : RVPTernaryAAABIntrinsics; + defm kmmawt : RVPTernaryAAABIntrinsics; + defm kmmawt_u : RVPTernaryAAABIntrinsics; + defm kmmawt2 : RVPTernaryAAABIntrinsics; + defm kmmawt2_u : RVPTernaryAAABIntrinsics; +} // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -172,6 +172,17 @@ } } + if (Subtarget.hasStdExtZpn()) { + if (Subtarget.is64Bit()) { + addRegisterClass(MVT::v8i8, &RISCV::GPRRegClass); + addRegisterClass(MVT::v4i16, &RISCV::GPRRegClass); + addRegisterClass(MVT::v2i32, &RISCV::GPRRegClass); + } else { + addRegisterClass(MVT::v4i8, &RISCV::GPRRegClass); + addRegisterClass(MVT::v2i16, &RISCV::GPRRegClass); + } + } + // Compute derived properties from the register classes. computeRegisterProperties(STI.getRegisterInfo()); @@ -932,6 +943,41 @@ } } + if (Subtarget.hasStdExtZpn()) { + const auto addTypeForP = [&](MVT VT, MVT PromotedBitwiseVT) { + // Expand all builtin opcodes. + for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) + setOperationAction(Opc, VT, Expand); + + setOperationAction(ISD::BITCAST, VT, Legal); + + // Promote load and store operations. + setOperationAction(ISD::LOAD, VT, Promote); + AddPromotedToType(ISD::LOAD, VT, PromotedBitwiseVT); + setOperationAction(ISD::STORE, VT, Promote); + AddPromotedToType(ISD::STORE, VT, PromotedBitwiseVT); + }; + + if (Subtarget.is64Bit()) { + addTypeForP(MVT::v8i8, MVT::i64); + addTypeForP(MVT::v4i16, MVT::i64); + addTypeForP(MVT::v2i32, MVT::i64); + } else { + addTypeForP(MVT::v4i8, MVT::i32); + addTypeForP(MVT::v2i16, MVT::i32); + } + + // Expand all truncating stores and extending loads. + for (MVT VT0 : MVT::vector_valuetypes()) { + for (MVT VT1 : MVT::vector_valuetypes()) { + setTruncStoreAction(VT0, VT1, Expand); + setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand); + setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand); + setLoadExtAction(ISD::EXTLOAD, VT0, VT1, Expand); + } + } + } + // Function alignments. const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); setMinFunctionAlignment(FunctionAlignment); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoP.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoP.td @@ -916,3 +916,276 @@ def : InstAlias<"kmada32 $rd, $rs1, $rs2", (KMAR64_64 GPR:$rd, GPR:$rs1, GPR:$rs2)>; def : InstAlias<"smbb32 $rd, $rs1, $rs2", (MULSR64_64 GPR:$rd, GPR:$rs1, GPR:$rs2)>; } // Predicates = [HasStdExtZpsfoperand, IsRV64] + +//===----------------------------------------------------------------------===// +// Intrinsics codegen patterns +//===----------------------------------------------------------------------===// + +class RVPBitconvertPat + : Pat<(DstVT (bitconvert (SrcVT SrcRC:$src))), + (COPY_TO_REGCLASS SrcRC:$src, DstRC)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; + +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +def : RVPBitconvertPat; +} // Predicates = [HasStdExtZpn] + +// Unary operation +class RVPUnaryIntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) XLenVT:$rs1)), + (Inst GPR:$rs1)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : Pat<(XLenVT (!cast("int_riscv_swap16") XLenVT:$rs1)), + (PKBT16 GPR:$rs1, GPR:$rs1)>; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +def : RVPUnaryIntPat; +} // Predicates = [HasStdExtZpn] + +// Binary operation +class RVPBinaryIntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) + XLenVT:$rs1, XLenVT:$rs2)), + (Inst GPR:$rs1, GPR:$rs2)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +def : RVPBinaryIntPat; +} // Predicates = [HasStdExtZpn] + +class RVPBinaryI3IntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) + XLenVT:$rs1, uimm3:$rs2)), + (Inst GPR:$rs1, uimm3:$rs2)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPBinaryI3IntPat; +def : RVPBinaryI3IntPat; +def : RVPBinaryI3IntPat; +} // Predicates = [HasStdExtZpn] + +class RVPBinaryI4IntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) + XLenVT:$rs1, uimm4:$rs2)), + (Inst GPR:$rs1, uimm4:$rs2)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPBinaryI4IntPat; +def : RVPBinaryI4IntPat; +def : RVPBinaryI4IntPat; +} // Predicates = [HasStdExtZpn] + +class RVPBinaryI5IntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) + XLenVT:$rs1, uimm5:$rs2)), + (Inst GPR:$rs1, uimm5:$rs2)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPBinaryI5IntPat; +def : RVPBinaryI5IntPat; +def : RVPBinaryI5IntPat; +} // Predicates = [HasStdExtZpn] + +// Ternary operation +class RVPTernaryIntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) + XLenVT:$rs1, XLenVT:$rs2, XLenVT:$rs3)), + (Inst GPR:$rs1, GPR:$rs2, GPR:$rs3)>; + +let Predicates = [HasStdExtZpn] in { +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +def : RVPTernaryIntPat; +} // Predicates = [HasStdExtZpn] + +class RVPTernaryINSBIntPat + : Pat<(XLenVT (!cast("int_riscv_" # IntID) + XLenVT:$rs1, XLenVT:$rs2, uimmlog2xlenbytes:$rs3)), + (Inst GPR:$rs1, GPR:$rs2, uimmlog2xlenbytes:$rs3)>; + +let Predicates = [HasStdExtZpn] in +def : RVPTernaryINSBIntPat; diff --git a/llvm/test/CodeGen/RISCV/rv32zpn-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv32zpn-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv32zpn-intrinsic.ll @@ -0,0 +1,2275 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-p -verify-machineinstrs < %s \ +; RUN: | FileCheck %s + +define i32 @add8(i32 %a, i32 %b) { +; CHECK-LABEL: add8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: add8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.add8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.add8.i32(i32, i32) + +define i32 @add16(i32 %a, i32 %b) { +; CHECK-LABEL: add16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: add16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.add16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.add16.i32(i32, i32) + +define i32 @ave(i32 %a, i32 %b) { +; CHECK-LABEL: ave: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ave a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ave.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ave.i32(i32, i32) + +define i32 @bitrev(i32 %a, i32 %b) { +; CHECK-LABEL: bitrev: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: bitrev a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.bitrev.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.bitrev.i32(i32, i32) + +define i32 @bpick(i32 %a, i32 %b, i32 %c) { +; CHECK-LABEL: bpick: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: bpick a0, a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.bpick.i32(i32 %a, i32 %b, i32 %c) + ret i32 %0 +} + +declare i32 @llvm.riscv.bpick.i32(i32, i32, i32) + +define i32 @clrs8(i32 %a) { +; CHECK-LABEL: clrs8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clrs8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.clrs8.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.clrs8.i32(i32) + +define i32 @clrs16(i32 %a) { +; CHECK-LABEL: clrs16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clrs16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.clrs16.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.clrs16.i32(i32) + +define i32 @clrs32(i32 %a) { +; CHECK-LABEL: clrs32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clrs32 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.clrs32.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.clrs32.i32(i32) + +define i32 @clz8(i32 %a) { +; CHECK-LABEL: clz8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clz8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.clz8.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.clz8.i32(i32) + +define i32 @clz16(i32 %a) { +; CHECK-LABEL: clz16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clz16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.clz16.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.clz16.i32(i32) + +define i32 @clz32(i32 %a) { +; CHECK-LABEL: clz32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clz32 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.clz32.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.clz32.i32(i32) + +define i32 @cmpeq8(i32 %a, i32 %b) { +; CHECK-LABEL: cmpeq8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: cmpeq8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.cmpeq8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.cmpeq8.i32(i32, i32) + +define i32 @cmpeq16(i32 %a, i32 %b) { +; CHECK-LABEL: cmpeq16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: cmpeq16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.cmpeq16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.cmpeq16.i32(i32, i32) + +define i32 @cras16(i32 %a, i32 %b) { +; CHECK-LABEL: cras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: cras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.cras16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.cras16.i32(i32, i32) + +define i32 @crsa16(i32 %a, i32 %b) { +; CHECK-LABEL: crsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: crsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.crsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.crsa16.i32(i32, i32) + +define i32 @insb(i32 %a, i32 %b) { +; CHECK-LABEL: insb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: insb a0, a1, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.insb.i32(i32 %a, i32 %b, i32 3) + ret i32 %0 +} + +declare i32 @llvm.riscv.insb.i32(i32, i32, i32) + +define i32 @kabs8(i32 %a) { +; CHECK-LABEL: kabs8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kabs8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kabs8.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.kabs8.i32(i32) + +define i32 @kabs16(i32 %a) { +; CHECK-LABEL: kabs16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kabs16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kabs16.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.kabs16.i32(i32) + +define i32 @kabsw(i32 %a) { +; CHECK-LABEL: kabsw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kabsw a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kabsw.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.kabsw.i32(i32) + +define i32 @kadd8(i32 %a, i32 %b) { +; CHECK-LABEL: kadd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kadd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kadd8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kadd8.i32(i32, i32) + +define i32 @kadd16(i32 %a, i32 %b) { +; CHECK-LABEL: kadd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kadd16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kadd16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kadd16.i32(i32, i32) + +define i32 @kaddh(i32 %a, i32 %b) { +; CHECK-LABEL: kaddh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kaddh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kaddh.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kaddh.i32(i32, i32) + +define i32 @kaddw(i32 %a, i32 %b) { +; CHECK-LABEL: kaddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kaddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kaddw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kaddw.i32(i32, i32) + +define i32 @kcras16(i32 %a, i32 %b) { +; CHECK-LABEL: kcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kcras16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kcras16.i32(i32, i32) + +define i32 @kcrsa16(i32 %a, i32 %b) { +; CHECK-LABEL: kcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kcrsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kcrsa16.i32(i32, i32) + +define i32 @kdmbb(i32 %a, i32 %b) { +; CHECK-LABEL: kdmbb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kdmbb a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kdmbb.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kdmbb.i32.i32(i32, i32) + +define i32 @kdmbt(i32 %a, i32 %b) { +; CHECK-LABEL: kdmbt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kdmbt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kdmbt.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kdmbt.i32.i32(i32, i32) + +define i32 @kdmtt(i32 %a, i32 %b) { +; CHECK-LABEL: kdmtt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kdmtt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kdmtt.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kdmtt.i32.i32(i32, i32) + +define i32 @kdmabb(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kdmabb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kdmabb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kdmabb.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kdmabb.i32.i32(i32, i32, i32) + +define i32 @kdmabt(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kdmabt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kdmabt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kdmabt.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kdmabt.i32.i32(i32, i32, i32) + +define i32 @kdmatt(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kdmatt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kdmatt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kdmatt.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kdmatt.i32.i32(i32, i32, i32) + +define i32 @khm8(i32 %a, i32 %b) { +; CHECK-LABEL: khm8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khm8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khm8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khm8.i32(i32, i32) + +define i32 @khmx8(i32 %a, i32 %b) { +; CHECK-LABEL: khmx8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmx8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khmx8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khmx8.i32(i32, i32) + +define i32 @khm16(i32 %a, i32 %b) { +; CHECK-LABEL: khm16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khm16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khm16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khm16.i32(i32, i32) + +define i32 @khmx16(i32 %a, i32 %b) { +; CHECK-LABEL: khmx16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmx16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khmx16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khmx16.i32(i32, i32) + +define i32 @khmbb(i32 %a, i32 %b) { +; CHECK-LABEL: khmbb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmbb a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khmbb.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khmbb.i32.i32(i32, i32) + +define i32 @khmbt(i32 %a, i32 %b) { +; CHECK-LABEL: khmbt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmbt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khmbt.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khmbt.i32.i32(i32, i32) + +define i32 @khmtt(i32 %a, i32 %b) { +; CHECK-LABEL: khmtt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmtt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.khmtt.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.khmtt.i32.i32(i32, i32) + +define i32 @kmabb(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmabb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmabb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmabb.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmabb.i32.i32(i32, i32, i32) + +define i32 @kmabt(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmabt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmabt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmabt.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmabt.i32.i32(i32, i32, i32) + +define i32 @kmatt(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmatt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmatt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmatt.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmatt.i32.i32(i32, i32, i32) + +define i32 @kmada(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmada: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmada a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmada.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmada.i32.i32(i32, i32, i32) + +define i32 @kmaxda(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmaxda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmaxda a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmaxda.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmaxda.i32.i32(i32, i32, i32) + +define i32 @kmads(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmads: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmads a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmads.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmads.i32.i32(i32, i32, i32) + +define i32 @kmadrs(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmadrs: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmadrs a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmadrs.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmadrs.i32.i32(i32, i32, i32) + +define i32 @kmaxds(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmaxds: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmaxds a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmaxds.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmaxds.i32.i32(i32, i32, i32) + +define i32 @kmda(i32 %a, i32 %b) { +; CHECK-LABEL: kmda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmda a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmda.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmda.i32.i32(i32, i32) + +define i32 @kmxda(i32 %a, i32 %b) { +; CHECK-LABEL: kmxda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmxda a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmxda.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmxda.i32.i32(i32, i32) + +define i32 @kmmac(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmac: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmac a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmac.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmac.i32(i32, i32, i32) + +define i32 @kmmac_u(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmac_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmac.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmac.u.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmac.u.i32(i32, i32, i32) + +define i32 @kmmawb(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawb.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawb.i32.i32(i32, i32, i32) + +define i32 @kmmawb_u(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawb_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawb.u.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawb.u.i32.i32(i32, i32, i32) + +define i32 @kmmawb2(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawb2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb2 a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawb2.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawb2.i32.i32(i32, i32, i32) + +define i32 @kmmawb2_u(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawb2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb2.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawb2.u.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawb2.u.i32.i32(i32, i32, i32) + +define i32 @kmmawt(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawt.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawt.i32.i32(i32, i32, i32) + +define i32 @kmmawt_u(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawt_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawt.u.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawt.u.i32.i32(i32, i32, i32) + +define i32 @kmmawt2(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawt2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt2 a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawt2.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawt2.i32.i32(i32, i32, i32) + +define i32 @kmmawt2_u(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmawt2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt2.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmawt2.u.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmawt2.u.i32.i32(i32, i32, i32) + +define i32 @kmmsb(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmsb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmsb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmsb.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmsb.i32(i32, i32, i32) + +define i32 @kmmsb_u(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmmsb_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmsb.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmsb.u.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmsb.u.i32(i32, i32, i32) + +define i32 @kmmwb2(i32 %a, i32 %b) { +; CHECK-LABEL: kmmwb2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwb2 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmwb2.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmwb2.i32.i32(i32, i32) + +define i32 @kmmwb2_u(i32 %a, i32 %b) { +; CHECK-LABEL: kmmwb2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwb2.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmwb2.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmwb2.u.i32.i32(i32, i32) + +define i32 @kmmwt2(i32 %a, i32 %b) { +; CHECK-LABEL: kmmwt2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwt2 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmwt2.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmwt2.i32.i32(i32, i32) + +define i32 @kmmwt2_u(i32 %a, i32 %b) { +; CHECK-LABEL: kmmwt2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwt2.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmmwt2.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmmwt2.u.i32.i32(i32, i32) + +define i32 @kmsda(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmsda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmsda a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmsda.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmsda.i32.i32(i32, i32, i32) + +define i32 @kmsxda(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: kmsxda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmsxda a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kmsxda.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kmsxda.i32.i32(i32, i32, i32) + +define i32 @ksllw(i32 %a, i32 %b) { +; CHECK-LABEL: ksllw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksllw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksllw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +define i32 @kslliw(i32 %a, i32 %b) { +; CHECK-LABEL: kslliw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslliw a0, a0, 20 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksllw.i32(i32 %a, i32 20) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksllw.i32(i32, i32) + +define i32 @ksll8(i32 %a, i32 %b) { +; CHECK-LABEL: ksll8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksll8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksll8.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +define i32 @kslli8(i32 %a, i32 %b) { +; CHECK-LABEL: kslli8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslli8 a0, a0, 3 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksll8.i32.i32(i32 %a, i32 3) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksll8.i32.i32(i32, i32) + +define i32 @ksll16(i32 %a, i32 %b) { +; CHECK-LABEL: ksll16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksll16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksll16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +define i32 @kslli16(i32 %a, i32 %b) { +; CHECK-LABEL: kslli16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslli16 a0, a0, 11 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksll16.i32.i32(i32 %a, i32 11) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksll16.i32.i32(i32, i32) + +define i32 @kslra8(i32 %a, i32 %b) { +; CHECK-LABEL: kslra8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kslra8.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kslra8.i32.i32(i32, i32) + +define i32 @kslra8_u(i32 %a, i32 %b) { +; CHECK-LABEL: kslra8_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra8.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kslra8.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kslra8.u.i32.i32(i32, i32) + +define i32 @kslra16(i32 %a, i32 %b) { +; CHECK-LABEL: kslra16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kslra16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kslra16.i32.i32(i32, i32) + +define i32 @kslra16_u(i32 %a, i32 %b) { +; CHECK-LABEL: kslra16_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kslra16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +define i32 @kslraw(i32 %a, i32 %b) { +; CHECK-LABEL: kslraw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslraw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kslraw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kslraw.i32(i32, i32) + +define i32 @kslraw_u(i32 %a, i32 %b) { +; CHECK-LABEL: kslraw_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslraw.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kslraw.u.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kslraw.u.i32(i32, i32) + +define i32 @kstas16(i32 %a, i32 %b) { +; CHECK-LABEL: kstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kstas16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kstas16.i32(i32, i32) + +define i32 @kstsa16(i32 %a, i32 %b) { +; CHECK-LABEL: kstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kstsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kstsa16.i32(i32, i32) + +define i32 @ksub8(i32 %a, i32 %b) { +; CHECK-LABEL: ksub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksub8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksub8.i32(i32, i32) + +define i32 @ksub16(i32 %a, i32 %b) { +; CHECK-LABEL: ksub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksub16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksub16.i32(i32, i32) + +define i32 @ksubh(i32 %a, i32 %b) { +; CHECK-LABEL: ksubh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksubh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksubh.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksubh.i32(i32, i32) + +define i32 @ksubw(i32 %a, i32 %b) { +; CHECK-LABEL: ksubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ksubw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ksubw.i32(i32, i32) + +define i32 @kwmmul(i32 %a, i32 %b) { +; CHECK-LABEL: kwmmul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kwmmul a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kwmmul.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kwmmul.i32(i32, i32) + +define i32 @kwmmul_u(i32 %a, i32 %b) { +; CHECK-LABEL: kwmmul_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kwmmul.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.kwmmul.u.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.kwmmul.u.i32(i32, i32) + +define i32 @maxw(i32 %a, i32 %b) { +; CHECK-LABEL: maxw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: maxw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.maxw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.maxw.i32(i32, i32) + +define i32 @minw(i32 %a, i32 %b) { +; CHECK-LABEL: minw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: minw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.minw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.minw.i32(i32, i32) + +define i32 @pbsad(i32 %a, i32 %b) { +; CHECK-LABEL: pbsad: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pbsad a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.pbsad.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.pbsad.i32.i32(i32, i32) + +define i32 @pbsada(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: pbsada: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pbsada a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.pbsada.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.pbsada.i32.i32(i32, i32, i32) + +define i32 @pkbb16(i32 %a, i32 %b) { +; CHECK-LABEL: pkbb16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pkbb16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.pkbb16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.pkbb16.i32(i32, i32) + +define i32 @pkbt16(i32 %a, i32 %b) { +; CHECK-LABEL: pkbt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pkbt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.pkbt16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.pkbt16.i32(i32, i32) + +define i32 @pktt16(i32 %a, i32 %b) { +; CHECK-LABEL: pktt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pktt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.pktt16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.pktt16.i32(i32, i32) + +define i32 @pktb16(i32 %a, i32 %b) { +; CHECK-LABEL: pktb16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pktb16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.pktb16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.pktb16.i32(i32, i32) + +define i32 @radd8(i32 %a, i32 %b) { +; CHECK-LABEL: radd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: radd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.radd8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.radd8.i32(i32, i32) + +define i32 @radd16(i32 %a, i32 %b) { +; CHECK-LABEL: radd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: radd16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.radd16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.radd16.i32(i32, i32) + +define i32 @raddw(i32 %a, i32 %b) { +; CHECK-LABEL: raddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: raddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.raddw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.raddw.i32(i32, i32) + +define i32 @rcras16(i32 %a, i32 %b) { +; CHECK-LABEL: rcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rcras16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rcras16.i32(i32, i32) + +define i32 @rcrsa16(i32 %a, i32 %b) { +; CHECK-LABEL: rcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rcrsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rcrsa16.i32(i32, i32) + +define i32 @rstas16(i32 %a, i32 %b) { +; CHECK-LABEL: rstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rstas16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rstas16.i32(i32, i32) + +define i32 @rstsa16(i32 %a, i32 %b) { +; CHECK-LABEL: rstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rstsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rstsa16.i32(i32, i32) + +define i32 @rsub8(i32 %a, i32 %b) { +; CHECK-LABEL: rsub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rsub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rsub8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rsub8.i32(i32, i32) + +define i32 @rsub16(i32 %a, i32 %b) { +; CHECK-LABEL: rsub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rsub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rsub16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rsub16.i32(i32, i32) + +define i32 @rsubw(i32 %a, i32 %b) { +; CHECK-LABEL: rsubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rsubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.rsubw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.rsubw.i32(i32, i32) + +define i32 @sclip8(i32 %a) { +; CHECK-LABEL: sclip8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sclip8 a0, a0, 5 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sclip8.i32.i32(i32 %a, i32 5) + ret i32 %0 +} + +declare i32 @llvm.riscv.sclip8.i32.i32(i32, i32) + +define i32 @sclip16(i32 %a) { +; CHECK-LABEL: sclip16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sclip16 a0, a0, 6 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sclip16.i32.i32(i32 %a, i32 6) + ret i32 %0 +} + +declare i32 @llvm.riscv.sclip16.i32.i32(i32, i32) + +define i32 @sclip32(i32 %a) { +; CHECK-LABEL: sclip32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sclip32 a0, a0, 7 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sclip32.i32.i32(i32 %a, i32 7) + ret i32 %0 +} + +declare i32 @llvm.riscv.sclip32.i32.i32(i32, i32) + +define i32 @scmple8(i32 %a, i32 %b) { +; CHECK-LABEL: scmple8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmple8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.scmple8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.scmple8.i32(i32, i32) + +define i32 @scmple16(i32 %a, i32 %b) { +; CHECK-LABEL: scmple16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmple16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.scmple16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.scmple16.i32(i32, i32) + +define i32 @scmplt8(i32 %a, i32 %b) { +; CHECK-LABEL: scmplt8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmplt8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.scmplt8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.scmplt8.i32(i32, i32) + +define i32 @scmplt16(i32 %a, i32 %b) { +; CHECK-LABEL: scmplt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmplt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.scmplt16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.scmplt16.i32(i32, i32) + +define i32 @sll8(i32 %a, i32 %b) { +; CHECK-LABEL: sll8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sll8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sll8.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sll8.i32.i32(i32, i32) + +define i32 @sll16(i32 %a, i32 %b) { +; CHECK-LABEL: sll16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sll16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sll16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sll16.i32.i32(i32, i32) + +define i32 @smaqa(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: smaqa: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smaqa a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smaqa.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smaqa.i32.i32(i32, i32, i32) + +define i32 @smaqa_su(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: smaqa_su: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smaqa.su a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smaqa.su.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smaqa.su.i32.i32(i32, i32, i32) + +define i32 @smax8(i32 %a, i32 %b) { +; CHECK-LABEL: smax8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smax8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smax8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smax8.i32(i32, i32) + +define i32 @smax16(i32 %a, i32 %b) { +; CHECK-LABEL: smax16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smax16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smax16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smax16.i32(i32, i32) + +define i32 @smbb16(i32 %a, i32 %b) { +; CHECK-LABEL: smbb16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smbb16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smbb16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smbb16.i32.i32(i32, i32) + +define i32 @smbt16(i32 %a, i32 %b) { +; CHECK-LABEL: smbt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smbt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smbt16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smbt16.i32.i32(i32, i32) + +define i32 @smtt16(i32 %a, i32 %b) { +; CHECK-LABEL: smtt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smtt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smtt16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smtt16.i32.i32(i32, i32) + +define i32 @smds(i32 %a, i32 %b) { +; CHECK-LABEL: smds: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smds a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smds.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smds.i32.i32(i32, i32) + +define i32 @smdrs(i32 %a, i32 %b) { +; CHECK-LABEL: smdrs: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smdrs a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smdrs.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smdrs.i32.i32(i32, i32) + +define i32 @smxds(i32 %a, i32 %b) { +; CHECK-LABEL: smxds: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smxds a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smxds.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smxds.i32.i32(i32, i32) + +define i32 @smin8(i32 %a, i32 %b) { +; CHECK-LABEL: smin8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smin8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smin8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smin8.i32(i32, i32) + +define i32 @smin16(i32 %a, i32 %b) { +; CHECK-LABEL: smin16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smin16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smin16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smin16.i32(i32, i32) + +define i32 @smmul(i32 %a, i32 %b) { +; CHECK-LABEL: smmul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmul a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smmul.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smmul.i32(i32, i32) + +define i32 @smmul_u(i32 %a, i32 %b) { +; CHECK-LABEL: smmul_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmul.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smmul.u.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smmul.u.i32(i32, i32) + +define i32 @smmwb(i32 %a, i32 %b) { +; CHECK-LABEL: smmwb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwb a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smmwb.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smmwb.i32.i32(i32, i32) + +define i32 @smmwb_u(i32 %a, i32 %b) { +; CHECK-LABEL: smmwb_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwb.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smmwb.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smmwb.u.i32.i32(i32, i32) + +define i32 @smmwt(i32 %a, i32 %b) { +; CHECK-LABEL: smmwt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smmwt.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smmwt.i32.i32(i32, i32) + +define i32 @smmwt_u(i32 %a, i32 %b) { +; CHECK-LABEL: smmwt_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwt.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.smmwt.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.smmwt.u.i32.i32(i32, i32) + +define i32 @sra_u(i32 %a, i32 %b) { +; CHECK-LABEL: sra_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sra.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sra.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sra.u.i32.i32(i32, i32) + +define i32 @sra8(i32 %a, i32 %b) { +; CHECK-LABEL: sra8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sra8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sra8.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sra8.i32.i32(i32, i32) + +define i32 @sra8_u(i32 %a, i32 %b) { +; CHECK-LABEL: sra8_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sra8.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sra8.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sra8.u.i32.i32(i32, i32) + +define i32 @sra16(i32 %a, i32 %b) { +; CHECK-LABEL: sra16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sra16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sra16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sra16.i32.i32(i32, i32) + +define i32 @sra16_u(i32 %a, i32 %b) { +; CHECK-LABEL: sra16_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sra16.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sra16.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sra16.u.i32.i32(i32, i32) + +define i32 @srl8(i32 %a, i32 %b) { +; CHECK-LABEL: srl8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: srl8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.srl8.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.srl8.i32.i32(i32, i32) + +define i32 @srl8_u(i32 %a, i32 %b) { +; CHECK-LABEL: srl8_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: srl8.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.srl8.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.srl8.u.i32.i32(i32, i32) + +define i32 @srl16(i32 %a, i32 %b) { +; CHECK-LABEL: srl16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: srl16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.srl16.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.srl16.i32.i32(i32, i32) + +define i32 @srl16_u(i32 %a, i32 %b) { +; CHECK-LABEL: srl16_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: srl16.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.srl16.u.i32.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.srl16.u.i32.i32(i32, i32) + +define i32 @stas16(i32 %a, i32 %b) { +; CHECK-LABEL: stas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: stas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.stas16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.stas16.i32(i32, i32) + +define i32 @stsa16(i32 %a, i32 %b) { +; CHECK-LABEL: stsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: stsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.stsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.stsa16.i32(i32, i32) + +define i32 @sub8(i32 %a, i32 %b) { +; CHECK-LABEL: sub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sub8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sub8.i32(i32, i32) + +define i32 @sub16(i32 %a, i32 %b) { +; CHECK-LABEL: sub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sub16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.sub16.i32(i32, i32) + +define i32 @sunpkd810(i32 %a) { +; CHECK-LABEL: sunpkd810: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd810 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sunpkd810.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.sunpkd810.i32(i32) + +define i32 @sunpkd820(i32 %a) { +; CHECK-LABEL: sunpkd820: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd820 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sunpkd820.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.sunpkd820.i32(i32) + +define i32 @sunpkd830(i32 %a) { +; CHECK-LABEL: sunpkd830: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd830 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sunpkd830.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.sunpkd830.i32(i32) + +define i32 @sunpkd831(i32 %a) { +; CHECK-LABEL: sunpkd831: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd831 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sunpkd831.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.sunpkd831.i32(i32) + +define i32 @sunpkd832(i32 %a) { +; CHECK-LABEL: sunpkd832: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd832 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.sunpkd832.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.sunpkd832.i32(i32) + +define i32 @swap8(i32 %a) { +; CHECK-LABEL: swap8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: swap8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.swap8.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.swap8.i32(i32) + +define i32 @swap16(i32 %a) { +; CHECK-LABEL: swap16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: swap16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.swap16.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.swap16.i32(i32) + +define i32 @uclip8(i32 %a) { +; CHECK-LABEL: uclip8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uclip8 a0, a0, 5 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uclip8.i32.i32(i32 %a, i32 5) + ret i32 %0 +} + +declare i32 @llvm.riscv.uclip8.i32.i32(i32, i32) + +define i32 @uclip16(i32 %a) { +; CHECK-LABEL: uclip16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uclip16 a0, a0, 6 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uclip16.i32.i32(i32 %a, i32 6) + ret i32 %0 +} + +declare i32 @llvm.riscv.uclip16.i32.i32(i32, i32) + +define i32 @uclip32(i32 %a) { +; CHECK-LABEL: uclip32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uclip32 a0, a0, 7 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uclip32.i32.i32(i32 %a, i32 7) + ret i32 %0 +} + +declare i32 @llvm.riscv.uclip32.i32.i32(i32, i32) + +define i32 @ucmple8(i32 %a, i32 %b) { +; CHECK-LABEL: ucmple8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmple8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ucmple8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ucmple8.i32(i32, i32) + +define i32 @ucmple16(i32 %a, i32 %b) { +; CHECK-LABEL: ucmple16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmple16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ucmple16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ucmple16.i32(i32, i32) + +define i32 @ucmplt8(i32 %a, i32 %b) { +; CHECK-LABEL: ucmplt8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmplt8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ucmplt8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ucmplt8.i32(i32, i32) + +define i32 @ucmplt16(i32 %a, i32 %b) { +; CHECK-LABEL: ucmplt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmplt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ucmplt16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ucmplt16.i32(i32, i32) + +define i32 @ukadd8(i32 %a, i32 %b) { +; CHECK-LABEL: ukadd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukadd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukadd8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukadd8.i32(i32, i32) + +define i32 @ukadd16(i32 %a, i32 %b) { +; CHECK-LABEL: ukadd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukadd16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukadd16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukadd16.i32(i32, i32) + +define i32 @ukaddh(i32 %a, i32 %b) { +; CHECK-LABEL: ukaddh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukaddh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukaddh.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukaddh.i32(i32, i32) + +define i32 @ukaddw(i32 %a, i32 %b) { +; CHECK-LABEL: ukaddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukaddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukaddw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukaddw.i32(i32, i32) + +define i32 @ukcras16(i32 %a, i32 %b) { +; CHECK-LABEL: ukcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukcras16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukcras16.i32(i32, i32) + +define i32 @ukcrsa16(i32 %a, i32 %b) { +; CHECK-LABEL: ukcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukcrsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukcrsa16.i32(i32, i32) + +define i32 @ukstas16(i32 %a, i32 %b) { +; CHECK-LABEL: ukstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukstas16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukstas16.i32(i32, i32) + +define i32 @ukstsa16(i32 %a, i32 %b) { +; CHECK-LABEL: ukstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ukstsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ukstsa16.i32(i32, i32) + +define i32 @uksub8(i32 %a, i32 %b) { +; CHECK-LABEL: uksub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uksub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uksub8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uksub8.i32(i32, i32) + +define i32 @uksub16(i32 %a, i32 %b) { +; CHECK-LABEL: uksub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uksub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uksub16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uksub16.i32(i32, i32) + +define i32 @uksubh(i32 %a, i32 %b) { +; CHECK-LABEL: uksubh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uksubh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uksubh.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uksubh.i32(i32, i32) + +define i32 @uksubw(i32 %a, i32 %b) { +; CHECK-LABEL: uksubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uksubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uksubw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uksubw.i32(i32, i32) + +define i32 @umaqa(i32 %t, i32 %a, i32 %b) { +; CHECK-LABEL: umaqa: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umaqa a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.umaqa.i32.i32(i32 %t, i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.umaqa.i32.i32(i32, i32, i32) + +define i32 @umax8(i32 %a, i32 %b) { +; CHECK-LABEL: umax8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umax8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.umax8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.umax8.i32(i32, i32) + +define i32 @umax16(i32 %a, i32 %b) { +; CHECK-LABEL: umax16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umax16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.umax16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.umax16.i32(i32, i32) + +define i32 @umin8(i32 %a, i32 %b) { +; CHECK-LABEL: umin8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umin8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.umin8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.umin8.i32(i32, i32) + +define i32 @umin16(i32 %a, i32 %b) { +; CHECK-LABEL: umin16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umin16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.umin16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.umin16.i32(i32, i32) + +define i32 @uradd8(i32 %a, i32 %b) { +; CHECK-LABEL: uradd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uradd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uradd8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uradd8.i32(i32, i32) + +define i32 @uradd16(i32 %a, i32 %b) { +; CHECK-LABEL: uradd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uradd16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uradd16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uradd16.i32(i32, i32) + +define i32 @uraddw(i32 %a, i32 %b) { +; CHECK-LABEL: uraddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uraddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.uraddw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.uraddw.i32(i32, i32) + +define i32 @urcras16(i32 %a, i32 %b) { +; CHECK-LABEL: urcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.urcras16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.urcras16.i32(i32, i32) + +define i32 @urcrsa16(i32 %a, i32 %b) { +; CHECK-LABEL: urcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.urcrsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.urcrsa16.i32(i32, i32) + +define i32 @urstas16(i32 %a, i32 %b) { +; CHECK-LABEL: urstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.urstas16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.urstas16.i32(i32, i32) + +define i32 @urstsa16(i32 %a, i32 %b) { +; CHECK-LABEL: urstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.urstsa16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.urstsa16.i32(i32, i32) + +define i32 @ursub8(i32 %a, i32 %b) { +; CHECK-LABEL: ursub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ursub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ursub8.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ursub8.i32(i32, i32) + +define i32 @ursub16(i32 %a, i32 %b) { +; CHECK-LABEL: ursub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ursub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ursub16.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ursub16.i32(i32, i32) + +define i32 @ursubw(i32 %a, i32 %b) { +; CHECK-LABEL: ursubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ursubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.ursubw.i32(i32 %a, i32 %b) + ret i32 %0 +} + +declare i32 @llvm.riscv.ursubw.i32(i32, i32) + +define i32 @zunpkd810(i32 %a) { +; CHECK-LABEL: zunpkd810: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd810 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.zunpkd810.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.zunpkd810.i32(i32) + +define i32 @zunpkd820(i32 %a) { +; CHECK-LABEL: zunpkd820: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd820 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.zunpkd820.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.zunpkd820.i32(i32) + +define i32 @zunpkd830(i32 %a) { +; CHECK-LABEL: zunpkd830: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd830 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.zunpkd830.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.zunpkd830.i32(i32) + +define i32 @zunpkd831(i32 %a) { +; CHECK-LABEL: zunpkd831: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd831 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.zunpkd831.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.zunpkd831.i32(i32) + +define i32 @zunpkd832(i32 %a) { +; CHECK-LABEL: zunpkd832: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd832 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i32 @llvm.riscv.zunpkd832.i32(i32 %a) + ret i32 %0 +} + +declare i32 @llvm.riscv.zunpkd832.i32(i32) diff --git a/llvm/test/CodeGen/RISCV/rv64zpn-intrinsic.ll b/llvm/test/CodeGen/RISCV/rv64zpn-intrinsic.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rv64zpn-intrinsic.ll @@ -0,0 +1,2391 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-p -verify-machineinstrs < %s \ +; RUN: | FileCheck %s + +define i64 @add8(i64 %a, i64 %b) { +; CHECK-LABEL: add8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: add8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.add8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.add8.i64(i64, i64) + +define i64 @add16(i64 %a, i64 %b) { +; CHECK-LABEL: add16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: add16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.add16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.add16.i64(i64, i64) + +define i64 @ave(i64 %a, i64 %b) { +; CHECK-LABEL: ave: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ave a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ave.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ave.i64(i64, i64) + +define i64 @bitrev(i64 %a, i64 %b) { +; CHECK-LABEL: bitrev: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: bitrev a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.bitrev.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.bitrev.i64(i64, i64) + +define i64 @bpick(i64 %a, i64 %b, i64 %c) { +; CHECK-LABEL: bpick: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: bpick a0, a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.bpick.i64(i64 %a, i64 %b, i64 %c) + ret i64 %0 +} + +declare i64 @llvm.riscv.bpick.i64(i64, i64, i64) + +define i64 @clrs8(i64 %a) { +; CHECK-LABEL: clrs8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clrs8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.clrs8.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.clrs8.i64(i64) + +define i64 @clrs16(i64 %a) { +; CHECK-LABEL: clrs16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clrs16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.clrs16.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.clrs16.i64(i64) + +define i64 @clrs32(i64 %a) { +; CHECK-LABEL: clrs32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clrs32 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.clrs32.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.clrs32.i64(i64) + +define i64 @clz8(i64 %a) { +; CHECK-LABEL: clz8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clz8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.clz8.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.clz8.i64(i64) + +define i64 @clz16(i64 %a) { +; CHECK-LABEL: clz16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clz16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.clz16.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.clz16.i64(i64) + +define i64 @clz32(i64 %a) { +; CHECK-LABEL: clz32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: clz32 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.clz32.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.clz32.i64(i64) + +define i64 @cmpeq8(i64 %a, i64 %b) { +; CHECK-LABEL: cmpeq8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: cmpeq8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.cmpeq8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.cmpeq8.i64(i64, i64) + +define i64 @cmpeq16(i64 %a, i64 %b) { +; CHECK-LABEL: cmpeq16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: cmpeq16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.cmpeq16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.cmpeq16.i64(i64, i64) + +define i64 @cras16(i64 %a, i64 %b) { +; CHECK-LABEL: cras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: cras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.cras16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.cras16.i64(i64, i64) + +define i64 @crsa16(i64 %a, i64 %b) { +; CHECK-LABEL: crsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: crsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.crsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.crsa16.i64(i64, i64) + +define i64 @insb(i64 %a, i64 %b) { +; CHECK-LABEL: insb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: insb a0, a1, 5 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.insb.i64(i64 %a, i64 %b, i64 5) + ret i64 %0 +} + +declare i64 @llvm.riscv.insb.i64(i64, i64, i64) + +define i64 @kabs8(i64 %a) { +; CHECK-LABEL: kabs8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kabs8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kabs8.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.kabs8.i64(i64) + +define i64 @kabs16(i64 %a) { +; CHECK-LABEL: kabs16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kabs16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kabs16.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.kabs16.i64(i64) + +define i64 @kabsw(i64 %a) { +; CHECK-LABEL: kabsw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kabsw a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kabsw.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.kabsw.i64(i64) + +define i64 @kadd8(i64 %a, i64 %b) { +; CHECK-LABEL: kadd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kadd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kadd8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kadd8.i64(i64, i64) + +define i64 @kadd16(i64 %a, i64 %b) { +; CHECK-LABEL: kadd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kadd16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kadd16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kadd16.i64(i64, i64) + +define i64 @kaddh(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kaddh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kaddh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kaddh.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kaddh.i64(i64, i64) + +define i64 @kaddw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kaddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kaddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kaddw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kaddw.i64(i64, i64) + +define i64 @kcras16(i64 %a, i64 %b) { +; CHECK-LABEL: kcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kcras16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kcras16.i64(i64, i64) + +define i64 @kcrsa16(i64 %a, i64 %b) { +; CHECK-LABEL: kcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kcrsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kcrsa16.i64(i64, i64) + +define i64 @kdmbb(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kdmbb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: kdmbb a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kdmbb.i64.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kdmbb.i64.i64(i64, i64) + +define i64 @kdmbt(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kdmbt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: kdmbt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kdmbt.i64.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kdmbt.i64.i64(i64, i64) + +define i64 @kdmtt(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kdmtt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: kdmtt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kdmtt.i64.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kdmtt.i64.i64(i64, i64) + +define i64 @kdmabb(i64 %t, i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kdmabb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: slli a2, a2, 32 +; CHECK-NEXT: srli a2, a2, 32 +; CHECK-NEXT: kdmabb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kdmabb.i64.i64(i64 %t, i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kdmabb.i64.i64(i64, i64, i64) + +define i64 @kdmabt(i64 %t, i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kdmabt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: slli a2, a2, 32 +; CHECK-NEXT: srli a2, a2, 32 +; CHECK-NEXT: kdmabt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kdmabt.i64.i64(i64 %t, i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kdmabt.i64.i64(i64, i64, i64) + +define i64 @kdmatt(i64 %t, i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kdmatt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: slli a2, a2, 32 +; CHECK-NEXT: srli a2, a2, 32 +; CHECK-NEXT: kdmatt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kdmatt.i64.i64(i64 %t, i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kdmatt.i64.i64(i64, i64, i64) + +define i64 @khm8(i64 %a, i64 %b) { +; CHECK-LABEL: khm8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khm8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.khm8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.khm8.i64(i64, i64) + +define i64 @khmx8(i64 %a, i64 %b) { +; CHECK-LABEL: khmx8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmx8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.khmx8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.khmx8.i64(i64, i64) + +define i64 @khm16(i64 %a, i64 %b) { +; CHECK-LABEL: khm16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khm16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.khm16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.khm16.i64(i64, i64) + +define i64 @khmx16(i64 %a, i64 %b) { +; CHECK-LABEL: khmx16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: khmx16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.khmx16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.khmx16.i64(i64, i64) + +define i64 @khmbb(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: khmbb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: khmbb a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.khmbb.i64.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.khmbb.i64.i64(i64, i64) + +define i64 @khmbt(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: khmbt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: khmbt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.khmbt.i64.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.khmbt.i64.i64(i64, i64) + +define i64 @khmtt(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: khmtt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: khmtt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.khmtt.i64.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.khmtt.i64.i64(i64, i64) + +define i64 @kmabb(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmabb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmabb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmabb.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmabb.i64.i64(i64, i64, i64) + +define i64 @kmabt(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmabt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmabt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmabt.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmabt.i64.i64(i64, i64, i64) + +define i64 @kmatt(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmatt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmatt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmatt.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmatt.i64.i64(i64, i64, i64) + +define i64 @kmada(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmada: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmada a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmada.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmada.i64.i64(i64, i64, i64) + +define i64 @kmaxda(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmaxda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmaxda a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmaxda.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmaxda.i64.i64(i64, i64, i64) + +define i64 @kmads(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmads: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmads a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmads.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmads.i64.i64(i64, i64, i64) + +define i64 @kmadrs(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmadrs: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmadrs a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmadrs.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmadrs.i64.i64(i64, i64, i64) + +define i64 @kmaxds(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmaxds: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmaxds a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmaxds.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmaxds.i64.i64(i64, i64, i64) + +define i64 @kmda(i64 %a, i64 %b) { +; CHECK-LABEL: kmda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmda a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmda.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmda.i64.i64(i64, i64) + +define i64 @kmxda(i64 %a, i64 %b) { +; CHECK-LABEL: kmxda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmxda a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmxda.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmxda.i64.i64(i64, i64) + +define i64 @kmmac(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmac: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmac a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmac.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmac.i64(i64, i64, i64) + +define i64 @kmmac_u(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmac_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmac.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmac.u.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmac.u.i64(i64, i64, i64) + +define i64 @kmmawb(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawb.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawb.i64.i64(i64, i64, i64) + +define i64 @kmmawb_u(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawb_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawb.u.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawb.u.i64.i64(i64, i64, i64) + +define i64 @kmmawb2(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawb2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb2 a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawb2.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawb2.i64.i64(i64, i64, i64) + +define i64 @kmmawb2_u(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawb2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawb2.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawb2.u.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawb2.u.i64.i64(i64, i64, i64) + +define i64 @kmmawt(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawt.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawt.i64.i64(i64, i64, i64) + +define i64 @kmmawt_u(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawt_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawt.u.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawt.u.i64.i64(i64, i64, i64) + +define i64 @kmmawt2(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawt2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt2 a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawt2.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawt2.i64.i64(i64, i64, i64) + +define i64 @kmmawt2_u(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmawt2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmawt2.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmawt2.u.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmawt2.u.i64.i64(i64, i64, i64) + +define i64 @kmmsb(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmsb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmsb a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmsb.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmsb.i64(i64, i64, i64) + +define i64 @kmmsb_u(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmmsb_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmsb.u a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmsb.u.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmsb.u.i64(i64, i64, i64) + +define i64 @kmmwb2(i64 %a, i64 %b) { +; CHECK-LABEL: kmmwb2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwb2 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmwb2.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmwb2.i64.i64(i64, i64) + +define i64 @kmmwb2_u(i64 %a, i64 %b) { +; CHECK-LABEL: kmmwb2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwb2.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmwb2.u.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmwb2.u.i64.i64(i64, i64) + +define i64 @kmmwt2(i64 %a, i64 %b) { +; CHECK-LABEL: kmmwt2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwt2 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmwt2.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmwt2.i64.i64(i64, i64) + +define i64 @kmmwt2_u(i64 %a, i64 %b) { +; CHECK-LABEL: kmmwt2_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmmwt2.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmmwt2.u.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmmwt2.u.i64.i64(i64, i64) + +define i64 @kmsda(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmsda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmsda a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmsda.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmsda.i64.i64(i64, i64, i64) + +define i64 @kmsxda(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: kmsxda: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kmsxda a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kmsxda.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kmsxda.i64.i64(i64, i64, i64) + +define i64 @ksllw(i64 %a, i32 signext %b) { +; CHECK-LABEL: ksllw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: ksllw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ksllw.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksllw.i64(i64, i64) + +define i64 @ksll8(i64 %a, i32 signext %b) { +; CHECK-LABEL: ksll8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: ksll8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ksll8.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksll8.i64.i64(i64, i64) + +define i64 @ksll16(i64 %a, i32 signext %b) { +; CHECK-LABEL: ksll16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: ksll16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ksll16.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksll16.i64.i64(i64, i64) + +define i64 @kslra8(i64 %a, i32 signext %b) { +; CHECK-LABEL: kslra8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kslra8.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.kslra8.i64.i64(i64, i64) + +define i64 @kslra8_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: kslra8_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra8.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kslra8.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.kslra8.u.i64.i64(i64, i64) + +define i64 @kslra16(i64 %a, i32 signext %b) { +; CHECK-LABEL: kslra16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kslra16.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.kslra16.i64.i64(i64, i64) + +define i64 @kslra16_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: kslra16_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslra16.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kslra16.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.kslra16.u.i64.i64(i64, i64) + +define i64 @kslraw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kslraw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslraw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kslraw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kslraw.i64(i64, i64) + +define i64 @kslraw_u(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: kslraw_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kslraw.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.kslraw.u.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.kslraw.u.i64(i64, i64) + +define i64 @kstas16(i64 %a, i64 %b) { +; CHECK-LABEL: kstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kstas16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kstas16.i64(i64, i64) + +define i64 @kstsa16(i64 %a, i64 %b) { +; CHECK-LABEL: kstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kstsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kstsa16.i64(i64, i64) + +define i64 @ksub8(i64 %a, i64 %b) { +; CHECK-LABEL: ksub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ksub8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksub8.i64(i64, i64) + +define i64 @ksub16(i64 %a, i64 %b) { +; CHECK-LABEL: ksub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ksub16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksub16.i64(i64, i64) + +define i64 @ksubh(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: ksubh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksubh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ksubh.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksubh.i64(i64, i64) + +define i64 @ksubw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: ksubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ksubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ksubw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.ksubw.i64(i64, i64) + +define i64 @kwmmul(i64 %a, i64 %b) { +; CHECK-LABEL: kwmmul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kwmmul a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kwmmul.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kwmmul.i64(i64, i64) + +define i64 @kwmmul_u(i64 %a, i64 %b) { +; CHECK-LABEL: kwmmul_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: kwmmul.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.kwmmul.u.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.kwmmul.u.i64(i64, i64) + +define i64 @maxw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: maxw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: maxw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.maxw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.maxw.i64(i64, i64) + +define i64 @minw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: minw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: minw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.minw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.minw.i64(i64, i64) + +define i64 @pbsad(i64 %a, i64 %b) { +; CHECK-LABEL: pbsad: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pbsad a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.pbsad.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.pbsad.i64.i64(i64, i64) + +define i64 @pbsada(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: pbsada: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pbsada a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.pbsada.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.pbsada.i64.i64(i64, i64, i64) + +define i64 @pkbb16(i64 %a, i64 %b) { +; CHECK-LABEL: pkbb16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pkbb16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.pkbb16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.pkbb16.i64(i64, i64) + +define i64 @pkbt16(i64 %a, i64 %b) { +; CHECK-LABEL: pkbt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pkbt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.pkbt16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.pkbt16.i64(i64, i64) + +define i64 @pktt16(i64 %a, i64 %b) { +; CHECK-LABEL: pktt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pktt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.pktt16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.pktt16.i64(i64, i64) + +define i64 @pktb16(i64 %a, i64 %b) { +; CHECK-LABEL: pktb16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pktb16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.pktb16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.pktb16.i64(i64, i64) + +define i64 @radd8(i64 %a, i64 %b) { +; CHECK-LABEL: radd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: radd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.radd8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.radd8.i64(i64, i64) + +define i64 @radd16(i64 %a, i64 %b) { +; CHECK-LABEL: radd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: radd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.radd8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +define i64 @raddw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: raddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: raddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.raddw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.raddw.i64(i64, i64) + +define i64 @rcras16(i64 %a, i64 %b) { +; CHECK-LABEL: rcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.rcras16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.rcras16.i64(i64, i64) + +define i64 @rcrsa16(i64 %a, i64 %b) { +; CHECK-LABEL: rcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.rcrsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.rcrsa16.i64(i64, i64) + +define i64 @rstas16(i64 %a, i64 %b) { +; CHECK-LABEL: rstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.rstas16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.rstas16.i64(i64, i64) + +define i64 @rstsa16(i64 %a, i64 %b) { +; CHECK-LABEL: rstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.rstsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.rstsa16.i64(i64, i64) + +define i64 @rsub8(i64 %a, i64 %b) { +; CHECK-LABEL: rsub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rsub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.rsub8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.rsub8.i64(i64, i64) + +define i64 @rsub16(i64 %a, i64 %b) { +; CHECK-LABEL: rsub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rsub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.rsub16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.rsub16.i64(i64, i64) + +define i64 @rsubw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: rsubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: rsubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.rsubw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.rsubw.i64(i64, i64) + +define i64 @sclip8(i64 %a) { +; CHECK-LABEL: sclip8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sclip8 a0, a0, 7 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sclip8.i64.i64(i64 %a, i64 7) + ret i64 %0 +} + +declare i64 @llvm.riscv.sclip8.i64.i64(i64, i64) + +define i64 @sclip16(i64 %a) { +; CHECK-LABEL: sclip16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sclip16 a0, a0, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sclip16.i64.i64(i64 %a, i64 8) + ret i64 %0 +} + +declare i64 @llvm.riscv.sclip16.i64.i64(i64, i64) + +define i64 @sclip32(i64 %a) { +; CHECK-LABEL: sclip32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sclip32 a0, a0, 9 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sclip32.i64.i64(i64 %a, i64 9) + ret i64 %0 +} + +declare i64 @llvm.riscv.sclip32.i64.i64(i64, i64) + +define i64 @scmple8(i64 %a, i64 %b) { +; CHECK-LABEL: scmple8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmple8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.scmple8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.scmple8.i64(i64, i64) + +define i64 @scmple16(i64 %a, i64 %b) { +; CHECK-LABEL: scmple16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmple16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.scmple16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.scmple16.i64(i64, i64) + +define i64 @scmplt8(i64 %a, i64 %b) { +; CHECK-LABEL: scmplt8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmplt8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.scmplt8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.scmplt8.i64(i64, i64) + +define i64 @scmplt16(i64 %a, i64 %b) { +; CHECK-LABEL: scmplt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: scmplt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.scmplt16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.scmplt16.i64(i64, i64) + +define i64 @sll8(i64 %a, i32 signext %b) { +; CHECK-LABEL: sll8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sll8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sll8.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sll8.i64.i64(i64, i64) + +define i64 @sll16(i64 %a, i32 signext %b) { +; CHECK-LABEL: sll16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sll16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sll16.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sll16.i64.i64(i64, i64) + +define i64 @smaqa(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: smaqa: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smaqa a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smaqa.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smaqa.i64.i64(i64, i64, i64) + +define i64 @smaqa_su(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: smaqa_su: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smaqa.su a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smaqa.su.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smaqa.su.i64.i64(i64, i64, i64) + +define i64 @smax8(i64 %a, i64 %b) { +; CHECK-LABEL: smax8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smax8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smax8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smax8.i64(i64, i64) + +define i64 @smax16(i64 %a, i64 %b) { +; CHECK-LABEL: smax16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smax16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smax16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smax16.i64(i64, i64) + +define i64 @smbb16(i64 %a, i64 %b) { +; CHECK-LABEL: smbb16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smbb16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smbb16.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smbb16.i64.i64(i64, i64) + +define i64 @smbt16(i64 %a, i64 %b) { +; CHECK-LABEL: smbt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smbt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smbt16.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smbt16.i64.i64(i64, i64) + +define i64 @smtt16(i64 %a, i64 %b) { +; CHECK-LABEL: smtt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smtt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smtt16.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smtt16.i64.i64(i64, i64) + +define i64 @smds(i64 %a, i64 %b) { +; CHECK-LABEL: smds: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smds a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smds.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smds.i64.i64(i64, i64) + +define i64 @smdrs(i64 %a, i64 %b) { +; CHECK-LABEL: smdrs: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smdrs a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smdrs.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smdrs.i64.i64(i64, i64) + +define i64 @smxds(i64 %a, i64 %b) { +; CHECK-LABEL: smxds: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smxds a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smxds.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smxds.i64.i64(i64, i64) + +define i64 @smin8(i64 %a, i64 %b) { +; CHECK-LABEL: smin8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smin8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smin8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smin8.i64(i64, i64) + +define i64 @smin16(i64 %a, i64 %b) { +; CHECK-LABEL: smin16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smin16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smin16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smin16.i64(i64, i64) + +define i64 @smmul(i64 %a, i64 %b) { +; CHECK-LABEL: smmul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmul a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smmul.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smmul.i64(i64, i64) + +define i64 @smmul_u(i64 %a, i64 %b) { +; CHECK-LABEL: smmul_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmul.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smmul.u.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smmul.u.i64(i64, i64) + +define i64 @smmwb(i64 %a, i64 %b) { +; CHECK-LABEL: smmwb: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwb a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smmwb.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smmwb.i64.i64(i64, i64) + +define i64 @smmwb_u(i64 %a, i64 %b) { +; CHECK-LABEL: smmwb_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwb.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smmwb.u.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smmwb.u.i64.i64(i64, i64) + +define i64 @smmwt(i64 %a, i64 %b) { +; CHECK-LABEL: smmwt: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwt a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smmwt.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smmwt.i64.i64(i64, i64) + +define i64 @smmwt_u(i64 %a, i64 %b) { +; CHECK-LABEL: smmwt_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: smmwt.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.smmwt.u.i64.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.smmwt.u.i64.i64(i64, i64) + +define i64 @sra_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: sra_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sra.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sra.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sra.u.i64.i64(i64, i64) + +define i64 @sra8(i64 %a, i32 signext %b) { +; CHECK-LABEL: sra8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sra8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sra8.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sra8.i64.i64(i64, i64) + +define i64 @sra8_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: sra8_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sra8.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sra8.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sra8.u.i64.i64(i64, i64) + +define i64 @sra16(i64 %a, i32 signext %b) { +; CHECK-LABEL: sra16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sra16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sra16.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sra16.i64.i64(i64, i64) + +define i64 @sra16_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: sra16_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: sra16.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.sra16.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.sra16.u.i64.i64(i64, i64) + +define i64 @srl8(i64 %a, i32 signext %b) { +; CHECK-LABEL: srl8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: srl8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.srl8.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.srl8.i64.i64(i64, i64) + +define i64 @srl8_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: srl8_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: srl8.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.srl8.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.srl8.u.i64.i64(i64, i64) + +define i64 @srl16(i64 %a, i32 signext %b) { +; CHECK-LABEL: srl16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: srl16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.srl16.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.srl16.i64.i64(i64, i64) + +define i64 @srl16_u(i64 %a, i32 signext %b) { +; CHECK-LABEL: srl16_u: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: srl16.u a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.srl16.u.i64.i64(i64 %a, i64 %conv) + ret i64 %0 +} + +declare i64 @llvm.riscv.srl16.u.i64.i64(i64, i64) + +define i64 @stas16(i64 %a, i64 %b) { +; CHECK-LABEL: stas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: stas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.stas16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.stas16.i64(i64, i64) + +define i64 @stsa16(i64 %a, i64 %b) { +; CHECK-LABEL: stsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: stsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.stsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.stsa16.i64(i64, i64) + +define i64 @sub8(i64 %a, i64 %b) { +; CHECK-LABEL: sub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sub8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.sub8.i64(i64, i64) + +define i64 @sub16(i64 %a, i64 %b) { +; CHECK-LABEL: sub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sub16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.sub16.i64(i64, i64) + +define i64 @sunpkd810(i64 %a) { +; CHECK-LABEL: sunpkd810: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd810 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sunpkd810.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.sunpkd810.i64(i64) + +define i64 @sunpkd820(i64 %a) { +; CHECK-LABEL: sunpkd820: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd820 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sunpkd820.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.sunpkd820.i64(i64) + +define i64 @sunpkd830(i64 %a) { +; CHECK-LABEL: sunpkd830: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd830 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sunpkd830.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.sunpkd830.i64(i64) + +define i64 @sunpkd831(i64 %a) { +; CHECK-LABEL: sunpkd831: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd831 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sunpkd831.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.sunpkd831.i64(i64) + +define i64 @sunpkd832(i64 %a) { +; CHECK-LABEL: sunpkd832: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: sunpkd832 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.sunpkd832.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.sunpkd832.i64(i64) + +define i64 @swap8(i64 %a) { +; CHECK-LABEL: swap8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: swap8 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.swap8.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.swap8.i64(i64) + +define i64 @swap16(i64 %a) { +; CHECK-LABEL: swap16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: swap16 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.swap16.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.swap16.i64(i64) + +define i64 @uclip8(i64 %a) { +; CHECK-LABEL: uclip8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uclip8 a0, a0, 7 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uclip8.i64.i64(i64 %a, i64 7) + ret i64 %0 +} + +declare i64 @llvm.riscv.uclip8.i64.i64(i64, i64) + +define i64 @uclip16(i64 %a) { +; CHECK-LABEL: uclip16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uclip16 a0, a0, 8 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uclip16.i64.i64(i64 %a, i64 8) + ret i64 %0 +} + +declare i64 @llvm.riscv.uclip16.i64.i64(i64, i64) + +define i64 @uclip32(i64 %a) { +; CHECK-LABEL: uclip32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uclip32 a0, a0, 9 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uclip32.i64.i64(i64 %a, i64 9) + ret i64 %0 +} + +declare i64 @llvm.riscv.uclip32.i64.i64(i64, i64) + +define i64 @ucmple8(i64 %a, i64 %b) { +; CHECK-LABEL: ucmple8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmple8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ucmple8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ucmple8.i64(i64, i64) + +define i64 @ucmple16(i64 %a, i64 %b) { +; CHECK-LABEL: ucmple16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmple16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ucmple16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ucmple16.i64(i64, i64) + +define i64 @ucmplt8(i64 %a, i64 %b) { +; CHECK-LABEL: ucmplt8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmplt8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ucmplt8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ucmplt8.i64(i64, i64) + +define i64 @ucmplt16(i64 %a, i64 %b) { +; CHECK-LABEL: ucmplt16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ucmplt16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ucmplt16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ucmplt16.i64(i64, i64) + +define i64 @ukadd8(i64 %a, i64 %b) { +; CHECK-LABEL: ukadd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukadd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ukadd8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukadd8.i64(i64, i64) + +define i64 @ukadd16(i64 %a, i64 %b) { +; CHECK-LABEL: ukadd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukadd16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ukadd16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukadd16.i64(i64, i64) + +define i64 @ukaddh(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: ukaddh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukaddh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ukaddh.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukaddh.i64(i64, i64) + +define i64 @ukaddw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: ukaddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukaddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = sext i32 %a to i64 + %conv1 = sext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ukaddw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukaddw.i64(i64, i64) + +define i64 @ukcras16(i64 %a, i64 %b) { +; CHECK-LABEL: ukcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ukcras16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukcras16.i64(i64, i64) + +define i64 @ukcrsa16(i64 %a, i64 %b) { +; CHECK-LABEL: ukcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ukcrsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukcrsa16.i64(i64, i64) + +define i64 @ukstas16(i64 %a, i64 %b) { +; CHECK-LABEL: ukstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ukstas16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukstas16.i64(i64, i64) + +define i64 @ukstsa16(i64 %a, i64 %b) { +; CHECK-LABEL: ukstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ukstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ukstsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ukstsa16.i64(i64, i64) + +define i64 @uksub8(i64 %a, i64 %b) { +; CHECK-LABEL: uksub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uksub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uksub8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.uksub8.i64(i64, i64) + +define i64 @uksub16(i64 %a, i64 %b) { +; CHECK-LABEL: uksub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uksub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uksub16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.uksub16.i64(i64, i64) + +define i64 @uksubh(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: uksubh: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: uksubh a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.uksubh.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.uksubh.i64(i64, i64) + +define i64 @uksubw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: uksubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: uksubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.uksubw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.uksubw.i64(i64, i64) + +define i64 @umaqa(i64 %t, i64 %a, i64 %b) { +; CHECK-LABEL: umaqa: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umaqa a0, a1, a2 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.umaqa.i64.i64(i64 %t, i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.umaqa.i64.i64(i64, i64, i64) + +define i64 @umax8(i64 %a, i64 %b) { +; CHECK-LABEL: umax8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umax8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.umax8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.umax8.i64(i64, i64) + +define i64 @umax16(i64 %a, i64 %b) { +; CHECK-LABEL: umax16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umax16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.umax16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.umax16.i64(i64, i64) + +define i64 @umin8(i64 %a, i64 %b) { +; CHECK-LABEL: umin8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umin8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.umin8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.umin8.i64(i64, i64) + +define i64 @umin16(i64 %a, i64 %b) { +; CHECK-LABEL: umin16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: umin16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.umin16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.umin16.i64(i64, i64) + +define i64 @uradd8(i64 %a, i64 %b) { +; CHECK-LABEL: uradd8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uradd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uradd8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.uradd8.i64(i64, i64) + +define i64 @uradd16(i64 %a, i64 %b) { +; CHECK-LABEL: uradd16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: uradd8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.uradd8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +define i64 @uraddw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: uraddw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: uraddw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.uraddw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.uraddw.i64(i64, i64) + +define i64 @urcras16(i64 %a, i64 %b) { +; CHECK-LABEL: urcras16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urcras16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.urcras16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.urcras16.i64(i64, i64) + +define i64 @urcrsa16(i64 %a, i64 %b) { +; CHECK-LABEL: urcrsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urcrsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.urcrsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.urcrsa16.i64(i64, i64) + +define i64 @urstas16(i64 %a, i64 %b) { +; CHECK-LABEL: urstas16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urstas16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.urstas16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.urstas16.i64(i64, i64) + +define i64 @urstsa16(i64 %a, i64 %b) { +; CHECK-LABEL: urstsa16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: urstsa16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.urstsa16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.urstsa16.i64(i64, i64) + +define i64 @ursub8(i64 %a, i64 %b) { +; CHECK-LABEL: ursub8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ursub8 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ursub8.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ursub8.i64(i64, i64) + +define i64 @ursub16(i64 %a, i64 %b) { +; CHECK-LABEL: ursub16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ursub16 a0, a0, a1 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.ursub16.i64(i64 %a, i64 %b) + ret i64 %0 +} + +declare i64 @llvm.riscv.ursub16.i64(i64, i64) + +define i64 @ursubw(i32 signext %a, i32 signext %b) { +; CHECK-LABEL: ursubw: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: slli a0, a0, 32 +; CHECK-NEXT: srli a0, a0, 32 +; CHECK-NEXT: slli a1, a1, 32 +; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: ursubw a0, a0, a1 +; CHECK-NEXT: ret +entry: + %conv = zext i32 %a to i64 + %conv1 = zext i32 %b to i64 + %0 = tail call i64 @llvm.riscv.ursubw.i64(i64 %conv, i64 %conv1) + ret i64 %0 +} + +declare i64 @llvm.riscv.ursubw.i64(i64, i64) + +define i64 @zunpkd810(i64 %a) { +; CHECK-LABEL: zunpkd810: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd810 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.zunpkd810.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.zunpkd810.i64(i64) + +define i64 @zunpkd820(i64 %a) { +; CHECK-LABEL: zunpkd820: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd820 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.zunpkd820.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.zunpkd820.i64(i64) + +define i64 @zunpkd830(i64 %a) { +; CHECK-LABEL: zunpkd830: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd830 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.zunpkd830.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.zunpkd830.i64(i64) + +define i64 @zunpkd831(i64 %a) { +; CHECK-LABEL: zunpkd831: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd831 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.zunpkd831.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.zunpkd831.i64(i64) + +define i64 @zunpkd832(i64 %a) { +; CHECK-LABEL: zunpkd832: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: zunpkd832 a0, a0 +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.zunpkd832.i64(i64 %a) + ret i64 %0 +} + +declare i64 @llvm.riscv.zunpkd832.i64(i64)